agents: multi-step agent support

Split agent prompts on === PROMPT === delimiter. Each step runs as
a new user message in the same LLM conversation, so context carries
forward naturally between steps. Single-step agents are unchanged.

- AgentDef.prompt -> AgentDef.prompts: Vec<String>
- AgentBatch.prompt -> AgentBatch.prompts: Vec<String>
- API layer injects next prompt after each text response
- {{conversation:N}} parameterized byte budget for conversation context

Co-Authored-By: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
ProofOfConcept 2026-03-26 14:21:43 -04:00
parent baf208281d
commit 77d1d39f3f
6 changed files with 166 additions and 65 deletions

View file

@ -13,7 +13,8 @@ use crate::neuro::{
/// and the keys of nodes selected for processing, so the caller can
/// record visits after successful completion.
pub struct AgentBatch {
pub prompt: String,
/// Prompt steps — single-step agents have one entry, multi-step have several.
pub prompts: Vec<String>,
pub node_keys: Vec<String>,
}
@ -363,7 +364,8 @@ pub fn split_plan_prompt(store: &Store, key: &str) -> Result<String, String> {
let graph = store.build_graph();
// Override the query — we have a specific key to split
let keys = vec![key.to_string()];
let (prompt, _) = super::defs::resolve_placeholders(&def.prompt, store, &graph, &keys, 1);
let template = def.prompts.first().ok_or_else(|| "split.agent has no prompts".to_string())?;
let (prompt, _) = super::defs::resolve_placeholders(template, store, &graph, &keys, 1);
Ok(prompt)
}
@ -384,7 +386,12 @@ pub fn split_extract_prompt(store: &Store, parent_key: &str, child_key: &str, ch
pub fn consolidation_batch(store: &Store, count: usize, auto: bool) -> Result<(), String> {
if auto {
let batch = agent_prompt(store, "replay", count)?;
println!("{}", batch.prompt);
for (i, p) in batch.prompts.iter().enumerate() {
if batch.prompts.len() > 1 {
println!("=== STEP {} ===\n", i + 1);
}
println!("{}", p);
}
return Ok(());
}