agents: multi-step agent support
Split agent prompts on === PROMPT === delimiter. Each step runs as
a new user message in the same LLM conversation, so context carries
forward naturally between steps. Single-step agents are unchanged.
- AgentDef.prompt -> AgentDef.prompts: Vec<String>
- AgentBatch.prompt -> AgentBatch.prompts: Vec<String>
- API layer injects next prompt after each text response
- {{conversation:N}} parameterized byte budget for conversation context
Co-Authored-By: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
baf208281d
commit
77d1d39f3f
6 changed files with 166 additions and 65 deletions
|
|
@ -29,7 +29,9 @@ use std::path::PathBuf;
|
|||
pub struct AgentDef {
|
||||
pub agent: String,
|
||||
pub query: String,
|
||||
pub prompt: String,
|
||||
/// Prompt steps — single-step agents have one entry, multi-step have several.
|
||||
/// Steps are separated by `=== PROMPT ===` in the .agent file.
|
||||
pub prompts: Vec<String>,
|
||||
pub model: String,
|
||||
pub schedule: String,
|
||||
pub tools: Vec<String>,
|
||||
|
|
@ -67,16 +69,29 @@ struct AgentHeader {
|
|||
|
||||
fn default_model() -> String { "sonnet".into() }
|
||||
|
||||
/// Parse an agent file: first line is JSON config, rest is the prompt.
|
||||
/// Parse an agent file: first line is JSON config, rest is the prompt(s).
|
||||
/// Multiple prompts are separated by `=== PROMPT ===` lines.
|
||||
fn parse_agent_file(content: &str) -> Option<AgentDef> {
|
||||
let (first_line, rest) = content.split_once('\n')?;
|
||||
let header: AgentHeader = serde_json::from_str(first_line.trim()).ok()?;
|
||||
// Skip optional blank line between header and prompt body
|
||||
let prompt = rest.strip_prefix('\n').unwrap_or(rest);
|
||||
let body = rest.strip_prefix('\n').unwrap_or(rest);
|
||||
|
||||
// Split on === PROMPT === delimiter for multi-step agents
|
||||
let prompts: Vec<String> = body
|
||||
.split("\n=== PROMPT ===\n")
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
|
||||
if prompts.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(AgentDef {
|
||||
agent: header.agent,
|
||||
query: header.query,
|
||||
prompt: prompt.to_string(),
|
||||
prompts,
|
||||
model: header.model,
|
||||
schedule: header.schedule,
|
||||
tools: header.tools,
|
||||
|
|
@ -253,7 +268,7 @@ fn resolve(
|
|||
result_keys.push(key.clone());
|
||||
}
|
||||
|
||||
text.push_str("Use `poc-memory render KEY` and `poc-memory query \"neighbors('KEY')\"` to explore further.\n");
|
||||
text.push_str("Use memory_render(KEY) and memory_links(KEY) to explore further.\n");
|
||||
|
||||
Some(Resolved { text, keys: result_keys })
|
||||
}
|
||||
|
|
@ -445,9 +460,25 @@ fn resolve(
|
|||
})
|
||||
}
|
||||
|
||||
// input:KEY — read a named output file from the agent's output dir
|
||||
_ if name.starts_with("input:") => {
|
||||
let key = &name[6..];
|
||||
let dir = std::env::var("POC_AGENT_OUTPUT_DIR")
|
||||
.map(std::path::PathBuf::from)
|
||||
.unwrap_or_else(|_| crate::store::memory_dir().join("agent-output").join("default"));
|
||||
let path = dir.join(key);
|
||||
match std::fs::read_to_string(&path) {
|
||||
Ok(text) => Some(Resolved { text, keys: vec![] }),
|
||||
Err(_) => Some(Resolved { text: String::new(), keys: vec![] }),
|
||||
}
|
||||
}
|
||||
|
||||
// conversation — tail of the current session transcript (post-compaction)
|
||||
"conversation" => {
|
||||
let text = resolve_conversation();
|
||||
// conversation:N — same, but with an explicit byte budget
|
||||
_ if name == "conversation" || name.starts_with("conversation:") => {
|
||||
let max_bytes = name.strip_prefix("conversation:")
|
||||
.and_then(|s| s.parse::<usize>().ok());
|
||||
let text = resolve_conversation(max_bytes);
|
||||
if text.is_empty() { None }
|
||||
else { Some(Resolved { text, keys: vec![] }) }
|
||||
}
|
||||
|
|
@ -470,6 +501,24 @@ fn resolve(
|
|||
Some(Resolved { text, keys: vec![] })
|
||||
}
|
||||
|
||||
// latest_journal — the most recent journal entry for the journal agent
|
||||
"latest_journal" => {
|
||||
let text = store.nodes.get("journal")
|
||||
.map(|n| {
|
||||
// Get the last entry (last ## section)
|
||||
let content = &n.content;
|
||||
content.rfind("\n## ")
|
||||
.map(|pos| content[pos..].to_string())
|
||||
.unwrap_or_else(|| {
|
||||
// Take the last 2000 chars if no ## found
|
||||
let start = content.len().saturating_sub(2000);
|
||||
content[start..].to_string()
|
||||
})
|
||||
})
|
||||
.unwrap_or_else(|| "(no previous journal entry)".to_string());
|
||||
Some(Resolved { text, keys: vec!["journal".to_string()] })
|
||||
}
|
||||
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
|
@ -477,7 +526,7 @@ fn resolve(
|
|||
/// Get the tail of the current session's conversation.
|
||||
/// Reads POC_SESSION_ID to find the transcript, extracts the last
|
||||
/// segment (post-compaction), returns the tail (~100K chars).
|
||||
fn resolve_conversation() -> String {
|
||||
fn resolve_conversation(budget: Option<usize>) -> String {
|
||||
let session_id = std::env::var("POC_SESSION_ID").unwrap_or_default();
|
||||
if session_id.is_empty() { return String::new(); }
|
||||
|
||||
|
|
@ -502,12 +551,12 @@ fn resolve_conversation() -> String {
|
|||
};
|
||||
|
||||
let cfg = crate::config::get();
|
||||
let max_bytes = budget.unwrap_or_else(|| cfg.surface_conversation_bytes.unwrap_or(100_000));
|
||||
let mut fragments: Vec<String> = Vec::new();
|
||||
let mut total_bytes = 0;
|
||||
const MAX_BYTES: usize = 200_000;
|
||||
|
||||
for (role, content, ts) in iter {
|
||||
if total_bytes >= MAX_BYTES { break; }
|
||||
if total_bytes >= max_bytes { break; }
|
||||
let name = if role == "user" { &cfg.user_name } else { &cfg.assistant_name };
|
||||
let formatted = if !ts.is_empty() {
|
||||
format!("**{}** {}: {}", name, &ts[..ts.len().min(19)], content)
|
||||
|
|
@ -695,19 +744,18 @@ pub fn run_agent(
|
|||
vec![]
|
||||
};
|
||||
|
||||
// Substitute {agent_name} before resolving {{...}} placeholders,
|
||||
// so agents can reference their own notes: {{node:subconscious-notes-{agent_name}}}
|
||||
let template = def.prompt.replace("{agent_name}", &def.agent);
|
||||
let (prompt, extra_keys) = resolve_placeholders(&template, store, &graph, &keys, count);
|
||||
|
||||
// Identity and instructions are now pulled in via {{node:KEY}} placeholders.
|
||||
// Agents should include {{node:core-personality}} and {{node:memory-instructions-core}}
|
||||
// in their prompt templates. The resolve_placeholders call below handles this.
|
||||
|
||||
// Merge query keys with any keys produced by placeholder resolution
|
||||
// Resolve placeholders for all prompts. The conversation context
|
||||
// carries forward between steps naturally via the LLM's message history.
|
||||
let mut all_keys = keys;
|
||||
all_keys.extend(extra_keys);
|
||||
Ok(super::prompts::AgentBatch { prompt, node_keys: all_keys })
|
||||
let mut prompts = Vec::new();
|
||||
for prompt_template in &def.prompts {
|
||||
let template = prompt_template.replace("{agent_name}", &def.agent);
|
||||
let (prompt, extra_keys) = resolve_placeholders(&template, store, &graph, &all_keys, count);
|
||||
all_keys.extend(extra_keys);
|
||||
prompts.push(prompt);
|
||||
}
|
||||
|
||||
Ok(super::prompts::AgentBatch { prompts, node_keys: all_keys })
|
||||
}
|
||||
|
||||
/// Convert a list of keys to ReplayItems with priority and graph metrics.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue