IT BUILDS: Full AST migration compiles — zero errors

All callers migrated from old context types to AstNode/ContextState.
Killed: Message, Role (api), ConversationEntry, ContextEntry,
ContextSection, working_stack, api/parsing.rs, api/types.rs,
api/openai.rs, context_old.rs.

Oneshot standalone path stubbed (needs completions API rewrite).
12 warnings remaining (dead code cleanup).

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-08 15:29:52 -04:00
parent d0d876e067
commit e587431f9a
5 changed files with 99 additions and 224 deletions

View file

@ -15,8 +15,7 @@
// hasn't internalized. 2 API calls.
use crate::agent::api::ApiClient;
use crate::agent::api::*;
use crate::agent::context::{ConversationEntry, ContextEntry, ContextState};
use crate::agent::context::{AstNode, Ast, NodeBody, ContextState, Role};
const SCORE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(120);
@ -30,39 +29,71 @@ enum Filter<'a> {
SkipAllMemories,
}
fn is_memory(node: &AstNode) -> bool {
matches!(node, AstNode::Leaf(leaf) if matches!(leaf.body(), NodeBody::Memory { .. }))
}
fn memory_key(node: &AstNode) -> Option<&str> {
match node {
AstNode::Leaf(leaf) => match leaf.body() {
NodeBody::Memory { key, .. } => Some(key),
_ => None,
},
_ => None,
}
}
fn is_assistant(node: &AstNode) -> bool {
matches!(node, AstNode::Branch { role: Role::Assistant, .. })
}
/// Push an AstNode as one or more JSON messages for the scoring API.
fn push_api_message(node: &AstNode, msgs: &mut Vec<serde_json::Value>) {
match node {
AstNode::Branch { role, children } => {
let content: String = children.iter().map(|c| c.render()).collect();
msgs.push(serde_json::json!({
"role": role.as_str(),
"content": content,
}));
}
AstNode::Leaf(leaf) => {
let role = match leaf.body() {
NodeBody::ToolResult(_) => "tool",
_ => "user",
};
msgs.push(serde_json::json!({
"role": role,
"content": leaf.body().text(),
}));
}
}
}
/// Build the messages array for a scoring call.
///
/// Always includes system prompt + context message as prefix, then
/// entries from `range` filtered by `filter`.
/// Always includes system prompt as prefix, then entries from `range`
/// filtered by `filter`.
fn build_messages(
context: &ContextState,
range: std::ops::Range<usize>,
filter: Filter,
) -> Vec<serde_json::Value> {
let mut msgs = Vec::new();
for e in context.system().entries() {
msgs.push(serde_json::json!({"role": "system", "content": e.entry.message().content_text()}));
for node in context.system() {
push_api_message(node, &mut msgs);
}
let ctx = context.render_context_message();
if !ctx.is_empty() {
msgs.push(serde_json::json!({"role": "user", "content": ctx}));
}
let entries = context.conversation().entries();
let entries = context.conversation();
for i in range {
let ce = &entries[i];
let entry = &ce.entry;
let node = &entries[i];
let skip = match &filter {
Filter::None => false,
Filter::SkipIndex(idx) => i == *idx,
Filter::SkipKey(key) => matches!(entry, ConversationEntry::Memory { key: k, .. } if k == *key),
Filter::SkipAllMemories => entry.is_memory(),
Filter::SkipKey(key) => memory_key(node) == Some(*key),
Filter::SkipAllMemories => is_memory(node),
};
if skip { continue; }
let m = entry.api_message();
msgs.push(serde_json::json!({
"role": m.role_str(),
"content": m.content_text(),
}));
push_api_message(node, &mut msgs);
}
msgs
}
@ -178,16 +209,13 @@ pub async fn score_memories(
context: &ContextState,
client: &ApiClient,
) -> anyhow::Result<MemoryScore> {
let mut memory_keys: Vec<String> = context.conversation.entries().iter()
.filter_map(|ce| match &ce.entry {
ConversationEntry::Memory { key, .. } => Some(key.clone()),
_ => None,
})
let mut memory_keys: Vec<String> = context.conversation().iter()
.filter_map(|node| memory_key(node).map(String::from))
.collect();
memory_keys.dedup();
let response_indices: Vec<usize> = context.conversation.entries().iter().enumerate()
.filter(|(_, ce)| ce.entry.message().role == Role::Assistant)
let response_indices: Vec<usize> = context.conversation().iter().enumerate()
.filter(|(_, node)| is_assistant(node))
.map(|(i, _)| i)
.collect();
@ -201,7 +229,7 @@ pub async fn score_memories(
let http = http_client();
let range = 0..context.conversation.entries().len();
let range = 0..context.conversation().len();
let baseline = call_score(&http, client, &build_messages(context, range.clone(), Filter::None)).await?;
@ -245,10 +273,10 @@ pub async fn score_memories(
/// Find the entry index after `start` that contains the Nth assistant response.
/// Returns (end_index, true) if N responses were found, (entries.len(), false) if not.
fn nth_response_end(entries: &[ContextEntry], start: usize, n: usize) -> (usize, bool) {
fn nth_response_end(entries: &[AstNode], start: usize, n: usize) -> (usize, bool) {
let mut count = 0;
for i in start..entries.len() {
if entries[i].entry.message().role == Role::Assistant {
if is_assistant(&entries[i]) {
count += 1;
if count >= n { return (i + 1, true); }
}
@ -270,17 +298,15 @@ pub async fn score_memory(
) -> anyhow::Result<f64> {
const RESPONSE_WINDOW: usize = 50;
let entries = context.conversation.entries();
let first_pos = match entries.iter().position(|ce| {
matches!(&ce.entry, ConversationEntry::Memory { key: k, .. } if k == key)
}) {
let entries = context.conversation();
let first_pos = match entries.iter().position(|node| memory_key(node) == Some(key)) {
Some(p) => p,
None => return Ok(0.0),
};
let (end, _) = nth_response_end(entries, first_pos, RESPONSE_WINDOW);
let range = first_pos..end;
if !entries[range.clone()].iter().any(|ce| ce.entry.message().role == Role::Assistant) {
if !entries[range.clone()].iter().any(|node| is_assistant(node)) {
return Ok(0.0);
}
@ -319,14 +345,14 @@ where
let store = crate::hippocampus::store::Store::load().unwrap_or_default();
for (i, ce) in context.conversation.entries().iter().enumerate() {
if let ConversationEntry::Memory { key, .. } = &ce.entry {
if !seen.insert(key.clone()) { continue; }
let last_scored = store.nodes.get(key.as_str())
for (i, node) in context.conversation().iter().enumerate() {
if let Some(key) = memory_key(node) {
if !seen.insert(key.to_owned()) { continue; }
let last_scored = store.nodes.get(key)
.map(|n| n.last_scored)
.unwrap_or(0);
if now - last_scored >= max_age_secs {
candidates.push((i, key.clone(), last_scored));
candidates.push((i, key.to_owned(), last_scored));
}
}
}
@ -337,11 +363,11 @@ where
let http = http_client();
let mut scored = 0;
let total_tokens = context.conversation.tokens();
let entries = context.conversation();
let total_tokens: usize = entries.iter().map(|n| n.tokens()).sum();
let token_cutoff = total_tokens * 60 / 100;
// Precompute cumulative token position for each entry
let entries = context.conversation.entries();
let mut cumulative: Vec<usize> = Vec::with_capacity(entries.len());
let mut running = 0;
for e in entries {
@ -355,9 +381,9 @@ where
if cumulative.get(*pos).copied().unwrap_or(total_tokens) > token_cutoff {
continue;
}
let (end, _) = nth_response_end(context.conversation.entries(), *pos, response_window);
let (end, _) = nth_response_end(context.conversation(), *pos, response_window);
let range = *pos..end;
if !context.conversation.entries()[range.clone()].iter().any(|ce| ce.entry.message().role == Role::Assistant) {
if !context.conversation()[range.clone()].iter().any(|node| is_assistant(node)) {
continue;
}
@ -397,10 +423,11 @@ pub async fn score_finetune(
count: usize,
client: &ApiClient,
) -> anyhow::Result<Vec<(usize, f64)>> {
let range = context.conversation.entries().len().saturating_sub(count)..context.conversation.entries().len();
let entries = context.conversation();
let range = entries.len().saturating_sub(count)..entries.len();
let response_positions: Vec<usize> = range.clone()
.filter(|&i| context.conversation.entries()[i].entry.message().role == Role::Assistant)
.filter(|&i| is_assistant(&entries[i]))
.collect();
if response_positions.is_empty() {
return Ok(Vec::new());