agents: extract run_and_apply, eliminate dead split-plan.md

- Add run_and_apply() — combines run_one_agent + action application
  into one call. Used by daemon job_consolidation_agent and
  consolidate_full, which had identical run+apply loops.

- Port split_plan_prompt() to use split.agent via defs::resolve_placeholders
  instead of loading the separate split-plan.md template. Make
  resolve_placeholders public for this.

- Delete prompts/split-plan.md — superseded by agents/split.agent
  which was already the canonical definition.
This commit is contained in:
ProofOfConcept 2026-03-10 17:51:32 -04:00
parent abab85d249
commit 945865f594
6 changed files with 35 additions and 127 deletions

View file

@ -83,7 +83,7 @@ pub fn consolidate_full_with_progress(
*store = Store::load()?; *store = Store::load()?;
} }
let result = match knowledge::run_one_agent(store, agent_type, *count, "consolidate") { let (total, applied) = match knowledge::run_and_apply(store, agent_type, *count, "consolidate") {
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
let msg = format!(" ERROR: {}", e); let msg = format!(" ERROR: {}", e);
@ -93,19 +93,10 @@ pub fn consolidate_full_with_progress(
continue; continue;
} }
}; };
total_actions += total;
let ts = store::compact_timestamp();
let mut applied = 0;
for action in &result.actions {
if knowledge::apply_action(store, action, agent_type, &ts, 0) {
applied += 1;
}
}
total_actions += result.actions.len();
total_applied += applied; total_applied += applied;
let msg = format!(" Done: {} actions ({} applied, {} no-ops)", let msg = format!(" Done: {} actions ({} applied)", total, applied);
result.actions.len(), applied, result.no_ops);
log_line(&mut log_buf, &msg); log_line(&mut log_buf, &msg);
on_progress(&msg); on_progress(&msg);
println!("{}", msg); println!("{}", msg);
@ -246,7 +237,7 @@ pub fn apply_consolidation(store: &mut Store, do_apply: bool, report_key: Option
return Ok(()); return Ok(());
} }
let ts = store::format_datetime(store::now_epoch()).replace([':', '-', 'T'], ""); let ts = store::compact_timestamp();
let mut applied = 0; let mut applied = 0;
for action in &all_actions { for action in &all_actions {
if knowledge::apply_action(store, action, "consolidate", &ts, 0) { if knowledge::apply_action(store, action, "consolidate", &ts, 0) {

View file

@ -118,7 +118,6 @@ fn job_fact_mine(ctx: &ExecutionContext, path: &str) -> Result<(), TaskError> {
} }
/// Run a single consolidation agent (replay, linker, separator, transfer, health). /// Run a single consolidation agent (replay, linker, separator, transfer, health).
/// Builds prompt, calls Sonnet, stores report node in the store.
fn job_consolidation_agent( fn job_consolidation_agent(
ctx: &ExecutionContext, ctx: &ExecutionContext,
agent_type: &str, agent_type: &str,
@ -129,19 +128,9 @@ fn job_consolidation_agent(
run_job(ctx, &format!("c-{}", agent), || { run_job(ctx, &format!("c-{}", agent), || {
ctx.log_line("loading store"); ctx.log_line("loading store");
let mut store = crate::store::Store::load()?; let mut store = crate::store::Store::load()?;
ctx.log_line(&format!("running agent: {} (batch={})", agent, batch)); ctx.log_line(&format!("running agent: {} (batch={})", agent, batch));
let result = super::knowledge::run_one_agent(&mut store, &agent, batch, "consolidate")?; let (total, applied) = super::knowledge::run_and_apply(&mut store, &agent, batch, "consolidate")?;
ctx.log_line(&format!("done: {} actions ({} applied)", total, applied));
let ts = crate::store::compact_timestamp();
let mut applied = 0;
for action in &result.actions {
if super::knowledge::apply_action(&mut store, action, &agent, &ts, 0) {
applied += 1;
}
}
ctx.log_line(&format!("done: {} actions ({} applied)", result.actions.len(), applied));
Ok(()) Ok(())
}) })
} }

View file

@ -184,7 +184,7 @@ fn resolve(
/// Resolve all {{placeholder}} patterns in a prompt template. /// Resolve all {{placeholder}} patterns in a prompt template.
/// Returns the resolved text and all node keys collected from placeholders. /// Returns the resolved text and all node keys collected from placeholders.
fn resolve_placeholders( pub fn resolve_placeholders(
template: &str, template: &str,
store: &Store, store: &Store,
graph: &Graph, graph: &Graph,

View file

@ -332,6 +332,26 @@ pub struct AgentResult {
pub node_keys: Vec<String>, pub node_keys: Vec<String>,
} }
/// Run a single agent and apply its actions (no depth tracking).
///
/// Returns (total_actions, applied_count) or an error.
pub fn run_and_apply(
store: &mut Store,
agent_name: &str,
batch_size: usize,
llm_tag: &str,
) -> Result<(usize, usize), String> {
let result = run_one_agent(store, agent_name, batch_size, llm_tag)?;
let ts = store::compact_timestamp();
let mut applied = 0;
for action in &result.actions {
if apply_action(store, action, agent_name, &ts, 0) {
applied += 1;
}
}
Ok((result.actions.len(), applied))
}
/// Run a single agent: build prompt → call LLM → store output → parse actions → record visits. /// Run a single agent: build prompt → call LLM → store output → parse actions → record visits.
/// ///
/// This is the common pipeline shared by the knowledge loop, consolidation pipeline, /// This is the common pipeline shared by the knowledge loop, consolidation pipeline,

View file

@ -366,12 +366,16 @@ pub fn format_split_plan_node(store: &Store, graph: &Graph, key: &str) -> String
out out
} }
/// Build split-plan prompt for a single node (phase 1) /// Build split-plan prompt for a single node (phase 1).
/// Uses the split.agent template with placeholders resolved for the given key.
pub fn split_plan_prompt(store: &Store, key: &str) -> Result<String, String> { pub fn split_plan_prompt(store: &Store, key: &str) -> Result<String, String> {
let def = super::defs::get_def("split")
.ok_or_else(|| "no split.agent file".to_string())?;
let graph = store.build_graph(); let graph = store.build_graph();
let topology = format_topology_header(&graph); // Override the query — we have a specific key to split
let node_section = format_split_plan_node(store, &graph, key); let keys = vec![key.to_string()];
load_prompt("split-plan", &[("{{TOPOLOGY}}", &topology), ("{{NODE}}", &node_section)]) let (prompt, _) = super::defs::resolve_placeholders(&def.prompt, store, &graph, &keys, 1);
Ok(prompt)
} }
/// Build split-extract prompt for one child (phase 2) /// Build split-extract prompt for one child (phase 2)

View file

@ -1,96 +0,0 @@
# Split Agent — Phase 1: Plan
You are a memory consolidation agent planning how to split an overgrown
node into focused, single-topic children.
## What you're doing
This node has grown to cover multiple distinct topics. Your job is to
identify the natural topic boundaries and propose a split plan. You are
NOT writing the content — a second phase will extract each child's
content separately.
## How to find split points
The node is shown with its **neighbor list grouped by community**. The
neighbors tell you what topics the node covers:
- If a node links to neighbors in 3 different communities, it likely
covers 3 different topics
- Content that relates to one neighbor cluster should go in one child;
content relating to another cluster goes in another child
- The community structure is your primary guide — don't just split by
sections or headings, split by **semantic topic**
## When NOT to split
- **Episodes that belong in sequence.** If a node tells a story — a
conversation that unfolded over time, a debugging session, an evening
together — don't break the narrative. Sequential events that form a
coherent arc should stay together even if they touch multiple topics.
The test: would reading one child without the others lose important
context about *what happened*?
## What to output
Output a JSON block describing the split plan:
```json
{
"action": "split",
"parent": "original-key",
"children": [
{
"key": "new-key-1",
"description": "Brief description of what this child covers",
"sections": ["Section Header 1", "Section Header 2"],
"neighbors": ["neighbor-key-a", "neighbor-key-b"]
},
{
"key": "new-key-2",
"description": "Brief description of what this child covers",
"sections": ["Section Header 3", "Another Section"],
"neighbors": ["neighbor-key-c"]
}
]
}
```
If the node should NOT be split:
```json
{
"action": "keep",
"parent": "original-key",
"reason": "Why this node is cohesive despite its size"
}
```
## Naming children
- Use descriptive kebab-case keys: `topic-subtopic`
- If the parent was `foo`, children might be `foo-technical`, `foo-personal`
- Keep names short (3-5 words max)
- Preserve any date prefixes from the parent key
## Section hints
The "sections" field is a guide for the extraction phase — list the
section headers or topic areas from the original content that belong
in each child. These don't need to be exact matches; they're hints
that help the extractor know what to include. Content that spans topics
or doesn't have a clear header can be mentioned in the description.
## Neighbor assignment
The "neighbors" field assigns the parent's graph edges to each child.
Look at the neighbor list — each neighbor should go to whichever child
is most semantically related. A neighbor can appear in multiple children
if it's relevant to both. Every neighbor should be assigned to at least
one child so no graph connections are lost.
{{TOPOLOGY}}
## Node to review
{{NODE}}