Shared subconscious state — walked keys are Mind-level, not per-agent

SubconsciousSharedState holds walked keys shared between all
subconscious agents. Enables splitting surface-observe into separate
surface and observe agents that share the same walked state.

Walked is passed to run_forked() at run time instead of living on
AutoAgent. UI shows walked count in the subconscious screen header.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-07 02:13:06 -04:00
parent ef868cb98f
commit f3ba7e7097
4 changed files with 37 additions and 23 deletions

View file

@ -54,8 +54,6 @@ pub struct AutoAgent {
pub steps: Vec<AutoStep>,
sampling: super::api::SamplingParams,
priority: i32,
/// Memory keys the surface agent was exploring — persists between runs.
pub walked: Vec<String>,
/// Named outputs from the agent's output() tool calls.
/// Collected per-run, read by Mind after completion.
pub outputs: std::collections::HashMap<String, String>,
@ -164,7 +162,6 @@ impl AutoAgent {
temperature, top_p: 0.95, top_k: 20,
},
priority,
walked: Vec::new(),
outputs: std::collections::HashMap::new(),
last_run_entries: Vec::new(),
current_phase: String::new(),
@ -186,18 +183,18 @@ impl AutoAgent {
}
/// Run forked from a conscious agent's context. Each call gets a
/// fresh fork for KV cache sharing. Walked state persists between runs.
/// fresh fork for KV cache sharing.
///
/// `memory_keys`: keys of Memory entries in the conscious agent's
/// context, used to resolve {{seen_current}} in prompt templates.
/// `memory_keys`: Memory entry keys from conscious context (for {{seen_current}}).
/// `walked`: shared walked keys from previous runs (for {{walked}}).
pub async fn run_forked(
&mut self,
agent: &Agent,
memory_keys: &[String],
walked: &[String],
) -> Result<String, String> {
// Resolve prompt templates with current state
let resolved_steps: Vec<AutoStep> = self.steps.iter().map(|s| AutoStep {
prompt: resolve_prompt(&s.prompt, memory_keys, &self.walked),
prompt: resolve_prompt(&s.prompt, memory_keys, walked),
phase: s.phase.clone(),
}).collect();
let orig_steps = std::mem::replace(&mut self.steps, resolved_steps);
@ -205,7 +202,6 @@ impl AutoAgent {
let fork_point = forked.context.entries.len();
let mut backend = Backend::Forked(forked);
let result = self.run_with_backend(&mut backend, None).await;
// Capture entries added during this run
if let Backend::Forked(ref agent) = backend {
self.last_run_entries = agent.context.entries[fork_point..].to_vec();
}