agents: multi-step agent support

Split agent prompts on === PROMPT === delimiter. Each step runs as
a new user message in the same LLM conversation, so context carries
forward naturally between steps. Single-step agents are unchanged.

- AgentDef.prompt -> AgentDef.prompts: Vec<String>
- AgentBatch.prompt -> AgentBatch.prompts: Vec<String>
- API layer injects next prompt after each text response
- {{conversation:N}} parameterized byte budget for conversation context

Co-Authored-By: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
ProofOfConcept 2026-03-26 14:21:43 -04:00
parent baf208281d
commit 77d1d39f3f
6 changed files with 166 additions and 65 deletions

View file

@ -21,16 +21,17 @@ pub(crate) fn call_simple(caller: &str, prompt: &str) -> Result<String, String>
}
};
super::api::call_api_with_tools_sync(caller, prompt, None, &log)
let prompts = vec![prompt.to_string()];
super::api::call_api_with_tools_sync(caller, &prompts, None, &log)
}
/// Call a model using an agent definition's configuration.
pub(crate) fn call_for_def(
/// Call a model using an agent definition's configuration (multi-step).
pub(crate) fn call_for_def_multi(
def: &super::defs::AgentDef,
prompt: &str,
prompts: &[String],
log: &(dyn Fn(&str) + Sync),
) -> Result<String, String> {
super::api::call_api_with_tools_sync(&def.agent, prompt, def.temperature, log)
super::api::call_api_with_tools_sync(&def.agent, prompts, def.temperature, log)
}
/// Parse a JSON response, handling markdown fences.