agents: multi-step agent support
Split agent prompts on === PROMPT === delimiter. Each step runs as
a new user message in the same LLM conversation, so context carries
forward naturally between steps. Single-step agents are unchanged.
- AgentDef.prompt -> AgentDef.prompts: Vec<String>
- AgentBatch.prompt -> AgentBatch.prompts: Vec<String>
- API layer injects next prompt after each text response
- {{conversation:N}} parameterized byte budget for conversation context
Co-Authored-By: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
baf208281d
commit
77d1d39f3f
6 changed files with 166 additions and 65 deletions
|
|
@ -226,7 +226,7 @@ fn generate_digest(
|
|||
// Load prompt from agent file; fall back to prompts dir
|
||||
let def = super::defs::get_def("digest");
|
||||
let template = match &def {
|
||||
Some(d) => d.prompt.clone(),
|
||||
Some(d) => d.prompts.first().cloned().unwrap_or_default(),
|
||||
None => {
|
||||
let path = crate::config::get().prompts_dir.join("digest.md");
|
||||
std::fs::read_to_string(&path)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue