defs.rs: async conversion, remove block_in_place

Convert resolve(), resolve_placeholders(), run_agent() to async.
Use memory_render/memory_query directly with .await instead of
block_in_place wrappers.

Propagate async to callers:
- config.rs: resolve(), load_session(), reload_for_model()
- identity.rs: load_memory_files(), assemble_context_message()
- oneshot.rs: run_one_agent()
- prompts.rs: agent_prompt()

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-13 14:55:41 -04:00
parent 9bb07bc26a
commit 359955f838
10 changed files with 44 additions and 64 deletions

View file

@ -1 +0,0 @@
ses_2864fa54cffe2jLoh5grt8UixA

View file

@ -553,7 +553,7 @@ impl Agent {
}
pub async fn compact(&self) {
match crate::config::reload_for_model(&self.app_config, &self.prompt_file) {
match crate::config::reload_for_model(&self.app_config, &self.prompt_file).await {
Ok(personality) => {
let mut ctx = self.context.lock().await;
// System section (prompt + tools) set by new(), don't touch it

View file

@ -260,7 +260,7 @@ impl AutoAgent {
.map_err(|e| format!("config: {}", e))?;
let personality = crate::config::reload_for_model(
&app, &app.prompts.other,
).map_err(|e| format!("config: {}", e))?;
).await.map_err(|e| format!("config: {}", e))?;
let agent = Agent::new(
client, personality,
@ -381,7 +381,7 @@ pub struct AgentResult {
/// Run an agent. If keys are provided, use them directly (bypassing the
/// agent's query). Otherwise, run the query to select target nodes.
pub fn run_one_agent(
pub async fn run_one_agent(
store: &mut Store,
agent_name: &str,
count: usize,
@ -406,7 +406,7 @@ pub fn run_one_agent(
for step in &def.steps {
let (prompt, extra_keys) = defs::resolve_placeholders(
&step.prompt, store, keys, count,
);
).await;
all_keys.extend(extra_keys);
resolved_steps.push(prompts::ResolvedStep {
prompt,
@ -420,7 +420,7 @@ pub fn run_one_agent(
batch
} else {
let effective_count = def.count.unwrap_or(count);
defs::run_agent(store, &def, effective_count, &Default::default())?
defs::run_agent(store, &def, effective_count, &Default::default()).await?
};
// Base memory tools + extras from agent def (matching unconscious.rs pattern)

View file

@ -46,7 +46,7 @@ pub async fn cmd_run_agent(agent: &str, count: usize, target: &[String], query:
let mut store = store::Store::load()?;
if let Err(e) = crate::agent::oneshot::run_one_agent(
&mut store, agent, count, Some(&[key.clone()]),
) {
).await {
println!("[{}] ERROR on {}: {}", agent, key, e);
}
}
@ -55,7 +55,7 @@ pub async fn cmd_run_agent(agent: &str, count: usize, target: &[String], query:
let mut store = store::Store::load()?;
crate::agent::oneshot::run_one_agent(
&mut store, agent, count, None,
)?;
).await?;
}
Ok(())
}

View file

@ -506,7 +506,7 @@ pub struct ResolvedModel {
impl AppConfig {
/// Resolve the active backend and assemble prompts into a SessionConfig.
pub fn resolve(&self, cli: &crate::user::CliArgs) -> Result<SessionConfig> {
pub async fn resolve(&self, cli: &crate::user::CliArgs) -> Result<SessionConfig> {
let cwd = std::env::current_dir().context("Failed to get current directory")?;
let (api_base, api_key, model, prompt_file);
@ -536,7 +536,7 @@ impl AppConfig {
let context_groups = get().context_groups.clone();
let (context_parts, config_file_count, memory_file_count) =
crate::mind::identity::assemble_context_message(&cwd, &prompt_file, self.memory_project.as_deref(), &context_groups)?;
crate::mind::identity::assemble_context_message(&cwd, &prompt_file, self.memory_project.as_deref(), &context_groups).await?;
let session_dir = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
@ -668,17 +668,17 @@ pub fn load_app(cli: &crate::user::CliArgs) -> Result<(AppConfig, Figment)> {
}
/// Load the full config: figment → AppConfig → resolve backend → assemble prompts.
pub fn load_session(cli: &crate::user::CliArgs) -> Result<(SessionConfig, Figment)> {
pub async fn load_session(cli: &crate::user::CliArgs) -> Result<(SessionConfig, Figment)> {
let (app, figment) = load_app(cli)?;
let config = app.resolve(cli)?;
let config = app.resolve(cli).await?;
Ok((config, figment))
}
/// Re-assemble context for a specific model's prompt file.
pub fn reload_for_model(app: &AppConfig, prompt_file: &str) -> Result<Vec<(String, String)>> {
pub async fn reload_for_model(app: &AppConfig, prompt_file: &str) -> Result<Vec<(String, String)>> {
let cwd = std::env::current_dir().context("Failed to get current directory")?;
let context_groups = get().context_groups.clone();
let (context_parts, _, _) = crate::mind::identity::assemble_context_message(&cwd, prompt_file, app.memory_project.as_deref(), &context_groups)?;
let (context_parts, _, _) = crate::mind::identity::assemble_context_message(&cwd, prompt_file, app.memory_project.as_deref(), &context_groups).await?;
Ok(context_parts)
}

View file

@ -7,6 +7,7 @@
use anyhow::Result;
use std::path::{Path, PathBuf};
use crate::agent::tools::memory::memory_render;
use crate::config::{ContextGroup, ContextSource};
/// Read a file if it exists and is non-empty.
@ -71,7 +72,7 @@ fn find_context_files(cwd: &Path, prompt_file: &str) -> Vec<PathBuf> {
/// 2. Project dir (if set)
/// 3. Global (~/.consciousness/)
/// For journal source, loads recent journal entries.
fn load_memory_files(memory_project: Option<&Path>, context_groups: &[ContextGroup]) -> Vec<(String, String)> {
async fn load_memory_files(memory_project: Option<&Path>, context_groups: &[ContextGroup]) -> Vec<(String, String)> {
let home = match dirs::home_dir() {
Some(h) => h,
None => return Vec::new(),
@ -94,12 +95,7 @@ fn load_memory_files(memory_project: Option<&Path>, context_groups: &[ContextGro
ContextSource::Store => {
// Load from the memory graph store via typed API
for key in &group.keys {
let content = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(
crate::agent::tools::memory::memory_render(None, key, Some(true))
)
});
if let Ok(c) = content {
if let Ok(c) = memory_render(None, key, Some(true)).await {
if !c.trim().is_empty() {
memories.push((key.clone(), c));
}
@ -141,7 +137,7 @@ fn load_memory_files(memory_project: Option<&Path>, context_groups: &[ContextGro
}
/// Context message: instruction files + memory files + manifest.
pub fn assemble_context_message(cwd: &Path, prompt_file: &str, memory_project: Option<&Path>, context_groups: &[ContextGroup]) -> Result<(Vec<(String, String)>, usize, usize)> {
pub async fn assemble_context_message(cwd: &Path, prompt_file: &str, memory_project: Option<&Path>, context_groups: &[ContextGroup]) -> Result<(Vec<(String, String)>, usize, usize)> {
let mut parts: Vec<(String, String)> = vec![
("Preamble".to_string(),
"Everything below is already loaded — your identity, instructions, \
@ -162,7 +158,7 @@ pub fn assemble_context_message(cwd: &Path, prompt_file: &str, memory_project: O
}
}
let memories = load_memory_files(memory_project, context_groups);
let memories = load_memory_files(memory_project, context_groups).await;
let memory_count = memories.len();
for (name, content) in memories {
parts.push((name, content));

View file

@ -265,7 +265,7 @@ pub async fn prepare_spawn(name: &str, mut auto: AutoAgent) -> Result<SpawnResul
let exclude: std::collections::HashSet<String> = std::collections::HashSet::new();
let batch = match defs::run_agent(
&store, &def, def.count.unwrap_or(5), &exclude,
) {
).await {
Ok(b) => b,
Err(e) => {
dbglog!("[unconscious] {} query failed: {}", name, e);

View file

@ -14,6 +14,7 @@
//
// The query selects what to operate on; placeholders pull in context.
use crate::agent::tools::memory::memory_render;
use crate::graph::Graph;
use crate::store::Store;
@ -198,7 +199,7 @@ struct Resolved {
/// Resolve a single {{placeholder}} by name.
/// Returns the replacement text and any node keys it produced (for visit tracking).
fn resolve(
async fn resolve(
name: &str,
store: &Store,
graph: &Graph,
@ -211,10 +212,13 @@ fn resolve(
let mut text = String::new();
let mut result_keys = Vec::new();
for key in keys {
if let Some(r) = resolve_tool(&format!("memory_render {}", key)) {
if !text.is_empty() { text.push_str("\n\n---\n\n"); }
text.push_str(&format!("## {}\n\n{}", key, r.text));
result_keys.push(key.clone());
match memory_render(None, key, None).await {
Ok(c) if !c.trim().is_empty() => {
if !text.is_empty() { text.push_str("\n\n---\n\n"); }
text.push_str(&format!("## {}\n\n{}", key, c));
result_keys.push(key.clone());
}
_ => continue,
}
}
if text.is_empty() { return None; }
@ -227,12 +231,7 @@ fn resolve(
let mut result_keys = Vec::new();
for key in keys {
let content = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(
crate::agent::tools::memory::memory_render(None, key, None)
)
});
match content {
match memory_render(None, key, None).await {
Ok(c) if !c.trim().is_empty() => {
text.push_str(&format!("#### {}\n\n{}\n\n---\n\n", key, c));
result_keys.push(key.clone());
@ -305,12 +304,7 @@ fn resolve(
let mut keys = Vec::new();
for group in &cfg.context_groups {
if !group.agent { continue; }
// Bridge sync→async using block_in_place (same as resolve_tool)
let entries = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(
crate::cli::node::get_group_content(group, &cfg)
)
});
let entries = crate::cli::node::get_group_content(group, &cfg).await;
for (key, content) in entries {
use std::fmt::Write;
writeln!(text, "--- {} ({}) ---", key, group.label).ok();
@ -366,7 +360,7 @@ fn resolve(
// tool:NAME ARGS — run a tool call and include its output
_ if name.starts_with("tool:") => {
let spec = name[5..].trim();
resolve_tool(spec)
resolve_tool(spec).await
}
// bash:COMMAND — run a shell command and include its stdout
@ -529,9 +523,8 @@ fn resolve_memory_ratio() -> String {
pct, keys.len(), memory_bytes / 1024, transcript_size / 1024)
}
/// Resolve a {{tool: name {args}}} placeholder by calling the tool
/// handler from the registry. Uses block_in_place to bridge sync→async.
fn resolve_tool(spec: &str) -> Option<Resolved> {
/// Resolve a {{tool: name {args}}} placeholder by calling the tool handler.
async fn resolve_tool(spec: &str) -> Option<Resolved> {
// Parse "tool_name {json args}" or "tool_name arg"
let (name, args) = match spec.find('{') {
Some(i) => {
@ -552,13 +545,7 @@ fn resolve_tool(spec: &str) -> Option<Resolved> {
let tools = crate::agent::tools::tools();
let tool = tools.iter().find(|t| t.name == name)?;
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(
(tool.handler)(None, args.clone())
)
});
match result {
match (tool.handler)(None, args.clone()).await {
Ok(text) => Some(Resolved { text, keys: vec![] }),
Err(e) => {
eprintln!("[defs] {{{{tool: {}}}}} failed: {}", name, e);
@ -569,7 +556,7 @@ fn resolve_tool(spec: &str) -> Option<Resolved> {
/// Resolve all {{placeholder}} patterns in a prompt template.
/// Returns the resolved text and all node keys collected from placeholders.
pub fn resolve_placeholders(
pub async fn resolve_placeholders(
template: &str,
store: &Store,
keys: &[String],
@ -585,7 +572,7 @@ pub fn resolve_placeholders(
let Some(rel_end) = result[start + 2..].find("}}") else { break };
let end = start + 2 + rel_end;
let name = result[start + 2..end].trim().to_lowercase();
match resolve(&name, store, &graph, keys, count) {
match resolve(&name, store, &graph, keys, count).await {
Some(resolved) => {
let len = resolved.text.len();
extra_keys.extend(resolved.keys);
@ -606,7 +593,7 @@ pub fn resolve_placeholders(
/// Run a config-driven agent: query → resolve placeholders → prompt.
/// `exclude` filters out nodes (and their neighborhoods) already being
/// worked on by other agents, preventing concurrent collisions.
pub fn run_agent(
pub async fn run_agent(
store: &Store,
def: &AgentDef,
count: usize,
@ -621,11 +608,9 @@ pub fn run_agent(
} else {
format!("{} | limit:{}", def.query, padded)
};
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(
crate::agent::tools::memory::memory_query(None, &query, None)
)
}).map_err(|e| e.to_string())?;
let result = crate::agent::tools::memory::memory_query(None, &query, None)
.await
.map_err(|e| e.to_string())?;
let filtered: Vec<String> = result.lines()
.filter(|l| !l.is_empty() && *l != "no results")
.map(|s| s.to_string())
@ -650,7 +635,7 @@ pub fn run_agent(
.replace("{agent_name}", &def.agent)
.replace("{user_name}", &cfg.user_name)
.replace("{assistant_name}", &cfg.assistant_name);
let (prompt, extra_keys) = resolve_placeholders(&template, store, &all_keys, count);
let (prompt, extra_keys) = resolve_placeholders(&template, store, &all_keys, count).await;
all_keys.extend(extra_keys);
resolved_steps.push(super::prompts::ResolvedStep {
prompt,

View file

@ -212,8 +212,8 @@ pub fn format_health_section(store: &Store, graph: &Graph) -> String {
}
/// Generate a specific agent prompt with filled-in data.
pub fn agent_prompt(store: &Store, agent: &str, count: usize) -> Result<AgentBatch, String> {
pub async fn agent_prompt(store: &Store, agent: &str, count: usize) -> Result<AgentBatch, String> {
let def = super::defs::get_def(agent)
.ok_or_else(|| format!("Unknown agent: {}", agent))?;
super::defs::run_agent(store, &def, count, &Default::default())
super::defs::run_agent(store, &def, count, &Default::default()).await
}

View file

@ -184,7 +184,7 @@ fn restore_terminal(terminal: &mut ratatui::Terminal<CrosstermBackend<io::Stdout
/// Top-level entry point — creates Mind and UI, wires them together.
async fn start(cli: crate::user::CliArgs) -> Result<()> {
let (config, _figment) = crate::config::load_session(&cli)?;
let (config, _figment) = crate::config::load_session(&cli).await?;
if config.app.debug {
unsafe { std::env::set_var("POC_DEBUG", "1") };