split out src/mind
This commit is contained in:
parent
ce04568454
commit
79e384f005
21 changed files with 1865 additions and 2175 deletions
|
|
@ -473,9 +473,9 @@ pub fn build_response_message(
|
|||
}
|
||||
|
||||
// Check for leaked tool calls in content text.
|
||||
let leaked = crate::user::parsing::parse_leaked_tool_calls(&content);
|
||||
let leaked = crate::agent::parsing::parse_leaked_tool_calls(&content);
|
||||
if !leaked.is_empty() {
|
||||
let cleaned = crate::user::parsing::strip_leaked_artifacts(&content);
|
||||
let cleaned = crate::agent::parsing::strip_leaked_artifacts(&content);
|
||||
return Message {
|
||||
role: Role::Assistant,
|
||||
content: if cleaned.trim().is_empty() { None }
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
pub mod api;
|
||||
pub mod context;
|
||||
pub mod parsing;
|
||||
pub mod tools;
|
||||
pub mod training;
|
||||
|
||||
|
|
@ -79,13 +80,10 @@ pub struct Agent {
|
|||
app_config: crate::config::AppConfig,
|
||||
pub prompt_file: String,
|
||||
/// Stable session ID for memory-search dedup across turns.
|
||||
session_id: String,
|
||||
pub session_id: String,
|
||||
/// Agent orchestration state (surface-observe, journal, reflect).
|
||||
/// TODO: move to Session — it's session-level, not agent-level.
|
||||
pub agent_cycles: crate::subconscious::subconscious::AgentCycleState,
|
||||
/// Latest memory importance scores from training scorer.
|
||||
pub memory_scores: Option<crate::agent::training::MemoryScore>,
|
||||
/// Whether a /score task is currently running.
|
||||
pub scoring_in_flight: bool,
|
||||
/// Shared active tools — Agent writes, TUI reads.
|
||||
pub active_tools: crate::user::ui_channel::SharedActiveTools,
|
||||
}
|
||||
|
|
@ -137,8 +135,6 @@ impl Agent {
|
|||
prompt_file,
|
||||
session_id,
|
||||
agent_cycles,
|
||||
memory_scores: None,
|
||||
scoring_in_flight: false,
|
||||
active_tools,
|
||||
};
|
||||
|
||||
|
|
@ -323,7 +319,7 @@ impl Agent {
|
|||
// Check for closing tag — parse and fire immediately
|
||||
if let Some(end) = tool_call_buf.find("</tool_call>") {
|
||||
let body = &tool_call_buf[..end];
|
||||
if let Some(call) = crate::user::parsing::parse_tool_call_body(body) {
|
||||
if let Some(call) = crate::agent::parsing::parse_tool_call_body(body) {
|
||||
let args: serde_json::Value =
|
||||
serde_json::from_str(&call.function.arguments).unwrap_or_default();
|
||||
let args_summary = summarize_args(&call.function.name, &args);
|
||||
|
|
@ -666,7 +662,7 @@ impl Agent {
|
|||
}
|
||||
|
||||
/// Build context state summary for the debug screen.
|
||||
pub fn context_state_summary(&self) -> Vec<ContextSection> {
|
||||
pub fn context_state_summary(&self, memory_scores: Option<&crate::agent::training::MemoryScore>) -> Vec<ContextSection> {
|
||||
let count = |s: &str| self.tokenizer.encode_with_special_tokens(s).len();
|
||||
|
||||
let mut sections = Vec::new();
|
||||
|
|
@ -758,7 +754,7 @@ impl Agent {
|
|||
_ => unreachable!(),
|
||||
};
|
||||
let text = entry.message().content_text();
|
||||
let score = self.memory_scores.as_ref()
|
||||
let score = memory_scores
|
||||
.and_then(|s| s.memory_weights.iter()
|
||||
.find(|(k, _)| k == key)
|
||||
.map(|(_, v)| *v));
|
||||
|
|
@ -823,7 +819,7 @@ impl Agent {
|
|||
};
|
||||
// Show which memories were important for this response
|
||||
let children = if m.role == Role::Assistant {
|
||||
self.memory_scores.as_ref()
|
||||
memory_scores
|
||||
.map(|s| s.important_memories_for_entry(i))
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
|
|
@ -965,7 +961,11 @@ impl Agent {
|
|||
|
||||
/// Push the current context summary to the shared state for the TUI to read.
|
||||
pub fn publish_context_state(&self) {
|
||||
let summary = self.context_state_summary();
|
||||
self.publish_context_state_with_scores(None);
|
||||
}
|
||||
|
||||
pub fn publish_context_state_with_scores(&self, memory_scores: Option<&crate::agent::training::MemoryScore>) {
|
||||
let summary = self.context_state_summary(memory_scores);
|
||||
if let Ok(mut dbg) = std::fs::OpenOptions::new().create(true).append(true)
|
||||
.open("/tmp/poc-journal-debug.log") {
|
||||
use std::io::Write;
|
||||
|
|
|
|||
210
src/agent/parsing.rs
Normal file
210
src/agent/parsing.rs
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
// parsing.rs — Tool call parsing for leaked/streamed XML
|
||||
//
|
||||
// When models stream tool calls as XML text (Qwen-style <tool_call>
|
||||
// blocks) rather than structured tool_calls, this module extracts
|
||||
// them from the response text.
|
||||
//
|
||||
// Handles two wire formats:
|
||||
// - Qwen XML: <function=name><parameter=key>value</parameter></function>
|
||||
// - JSON: {"name": "...", "arguments": {...}}
|
||||
//
|
||||
// Also handles streaming artifacts: whitespace inside XML tags from
|
||||
// token boundaries, </think> tags, etc.
|
||||
|
||||
use crate::agent::api::types::*;
|
||||
use crate::agent::tools::{ToolCall, ToolDef, FunctionCall};
|
||||
|
||||
/// Parse leaked tool calls from response text.
|
||||
/// Looks for `<tool_call>...</tool_call>` blocks and tries both
|
||||
/// XML and JSON formats for the body.
|
||||
/// Parse a single tool call body (content between `<tool_call>` and `</tool_call>`).
|
||||
pub fn parse_tool_call_body(body: &str) -> Option<ToolCall> {
|
||||
let normalized = normalize_xml_tags(body);
|
||||
let body = normalized.trim();
|
||||
let mut counter = 0u32;
|
||||
parse_xml_tool_call(body, &mut counter)
|
||||
.or_else(|| parse_json_tool_call(body, &mut counter))
|
||||
}
|
||||
|
||||
pub fn parse_leaked_tool_calls(text: &str) -> Vec<ToolCall> {
|
||||
// Normalize whitespace inside XML tags: "<\nfunction\n=\nbash\n>" → "<function=bash>"
|
||||
// This handles streaming tokenizers that split tags across tokens.
|
||||
let normalized = normalize_xml_tags(text);
|
||||
let text = &normalized;
|
||||
|
||||
let mut calls = Vec::new();
|
||||
let mut search_from = 0;
|
||||
let mut call_counter: u32 = 0;
|
||||
|
||||
while let Some(start) = text[search_from..].find("<tool_call>") {
|
||||
let abs_start = search_from + start;
|
||||
let after_tag = abs_start + "<tool_call>".len();
|
||||
|
||||
let end = match text[after_tag..].find("</tool_call>") {
|
||||
Some(pos) => after_tag + pos,
|
||||
None => break,
|
||||
};
|
||||
|
||||
let body = text[after_tag..end].trim();
|
||||
search_from = end + "</tool_call>".len();
|
||||
|
||||
// Try XML format first, then JSON
|
||||
if let Some(call) = parse_xml_tool_call(body, &mut call_counter) {
|
||||
calls.push(call);
|
||||
} else if let Some(call) = parse_json_tool_call(body, &mut call_counter) {
|
||||
calls.push(call);
|
||||
}
|
||||
}
|
||||
|
||||
calls
|
||||
}
|
||||
|
||||
/// Normalize whitespace inside XML-like tags for streaming tokenizers.
|
||||
/// Collapses whitespace between `<` and `>` so that `<\nfunction\n=\nbash\n>`
|
||||
/// becomes `<function=bash>`, and `</\nparameter\n>` becomes `</parameter>`.
|
||||
/// Leaves content between tags untouched.
|
||||
fn normalize_xml_tags(text: &str) -> String {
|
||||
let mut result = String::with_capacity(text.len());
|
||||
let mut chars = text.chars().peekable();
|
||||
while let Some(ch) = chars.next() {
|
||||
if ch == '<' {
|
||||
let mut tag = String::from('<');
|
||||
for inner in chars.by_ref() {
|
||||
if inner == '>' {
|
||||
tag.push('>');
|
||||
break;
|
||||
} else if inner.is_whitespace() {
|
||||
// Skip whitespace inside tags
|
||||
} else {
|
||||
tag.push(inner);
|
||||
}
|
||||
}
|
||||
result.push_str(&tag);
|
||||
} else {
|
||||
result.push(ch);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Parse a Qwen-style `<tag=value>body</tag>` pseudo-XML element.
|
||||
/// Returns `(value, body, rest)` on success.
|
||||
fn parse_qwen_tag<'a>(s: &'a str, tag: &str) -> Option<(&'a str, &'a str, &'a str)> {
|
||||
let open = format!("<{}=", tag);
|
||||
let close = format!("</{}>", tag);
|
||||
|
||||
let start = s.find(&open)? + open.len();
|
||||
let name_end = start + s[start..].find('>')?;
|
||||
let body_start = name_end + 1;
|
||||
let body_end = body_start + s[body_start..].find(&close)?;
|
||||
|
||||
Some((
|
||||
s[start..name_end].trim(),
|
||||
s[body_start..body_end].trim(),
|
||||
&s[body_end + close.len()..],
|
||||
))
|
||||
}
|
||||
|
||||
/// Parse Qwen's XML tool call format.
|
||||
fn parse_xml_tool_call(body: &str, counter: &mut u32) -> Option<ToolCall> {
|
||||
let (func_name, func_body, _) = parse_qwen_tag(body, "function")?;
|
||||
let func_name = func_name.to_string();
|
||||
|
||||
let mut args = serde_json::Map::new();
|
||||
let mut rest = func_body;
|
||||
while let Some((key, val, remainder)) = parse_qwen_tag(rest, "parameter") {
|
||||
args.insert(key.to_string(), serde_json::Value::String(val.to_string()));
|
||||
rest = remainder;
|
||||
}
|
||||
|
||||
*counter += 1;
|
||||
Some(ToolCall {
|
||||
id: format!("leaked_{}", counter),
|
||||
call_type: "function".to_string(),
|
||||
function: FunctionCall {
|
||||
name: func_name,
|
||||
arguments: serde_json::to_string(&args).unwrap_or_default(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse JSON tool call format (some models emit this).
|
||||
fn parse_json_tool_call(body: &str, counter: &mut u32) -> Option<ToolCall> {
|
||||
let v: serde_json::Value = serde_json::from_str(body).ok()?;
|
||||
let name = v["name"].as_str()?;
|
||||
let arguments = &v["arguments"];
|
||||
|
||||
*counter += 1;
|
||||
Some(ToolCall {
|
||||
id: format!("leaked_{}", counter),
|
||||
call_type: "function".to_string(),
|
||||
function: FunctionCall {
|
||||
name: name.to_string(),
|
||||
arguments: serde_json::to_string(arguments).unwrap_or_default(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Strip tool call XML and thinking tokens from text so the conversation
|
||||
/// history stays clean. Removes `<tool_call>...</tool_call>` blocks and
|
||||
/// `</think>` tags (thinking content before them is kept — it's useful context).
|
||||
pub fn strip_leaked_artifacts(text: &str) -> String {
|
||||
let normalized = normalize_xml_tags(text);
|
||||
let mut result = normalized.clone();
|
||||
|
||||
// Remove <tool_call>...</tool_call> blocks
|
||||
while let Some(start) = result.find("<tool_call>") {
|
||||
if let Some(end_pos) = result[start..].find("</tool_call>") {
|
||||
let end = start + end_pos + "</tool_call>".len();
|
||||
result = format!("{}{}", &result[..start], &result[end..]);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove </think> tags (but keep the thinking text before them)
|
||||
result = result.replace("</think>", "");
|
||||
|
||||
result.trim().to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_leaked_tool_call_clean() {
|
||||
let text = "thinking\n</think>\n<tool_call>\n<function=bash>\n<parameter=command>poc-memory used core-personality</parameter>\n</function>\n</tool_call>";
|
||||
let calls = parse_leaked_tool_calls(text);
|
||||
assert_eq!(calls.len(), 1);
|
||||
assert_eq!(calls[0].function.name, "bash");
|
||||
let args: serde_json::Value = serde_json::from_str(&calls[0].function.arguments).unwrap();
|
||||
assert_eq!(args["command"], "poc-memory used core-personality");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_leaked_tool_call_streamed_whitespace() {
|
||||
// Streaming tokenizer splits XML tags across tokens with newlines
|
||||
let text = "<tool_call>\n<\nfunction\n=\nbash\n>\n<\nparameter\n=\ncommand\n>pwd</\nparameter\n>\n</\nfunction\n>\n</tool_call>";
|
||||
let calls = parse_leaked_tool_calls(text);
|
||||
assert_eq!(calls.len(), 1, "should parse streamed format");
|
||||
assert_eq!(calls[0].function.name, "bash");
|
||||
let args: serde_json::Value = serde_json::from_str(&calls[0].function.arguments).unwrap();
|
||||
assert_eq!(args["command"], "pwd");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_preserves_content() {
|
||||
let text = "<function=bash>\n<parameter=command>echo hello world</parameter>\n</function>";
|
||||
let normalized = normalize_xml_tags(text);
|
||||
// Newlines between tags are not inside tags, so preserved
|
||||
assert_eq!(normalized, "<function=bash>\n<parameter=command>echo hello world</parameter>\n</function>");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_strips_tag_internal_whitespace() {
|
||||
let text = "<\nfunction\n=\nbash\n>";
|
||||
let normalized = normalize_xml_tags(text);
|
||||
assert_eq!(normalized, "<function=bash>");
|
||||
}
|
||||
}
|
||||
|
|
@ -288,6 +288,79 @@ pub async fn score_memory(
|
|||
Ok(divs.iter().sum())
|
||||
}
|
||||
|
||||
// ── Background memory scoring ───────────────────────────────────
|
||||
|
||||
/// Incrementally score memories through the conversation.
|
||||
///
|
||||
/// Walks memory entries in conversation order starting from `cursor`.
|
||||
/// For each memory with a full WINDOW after it, calls score_memory()
|
||||
/// and yields the result. Stops at the first memory that doesn't have
|
||||
/// enough messages yet — the conversation needs to grow before we can
|
||||
/// score it.
|
||||
///
|
||||
/// Returns the updated cursor (entry index to resume from next time)
|
||||
/// and the scores for each memory that was scored this round.
|
||||
pub async fn score_memories_incremental(
|
||||
context: &ContextState,
|
||||
cursor: usize,
|
||||
client: &ApiClient,
|
||||
ui_tx: &UiSender,
|
||||
) -> anyhow::Result<(usize, Vec<(String, f64)>)> {
|
||||
const WINDOW: usize = 50;
|
||||
|
||||
// Collect unique memory keys with their first position, starting from cursor
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
let mut to_score: Vec<(usize, String)> = Vec::new();
|
||||
|
||||
for (i, entry) in context.entries.iter().enumerate().skip(cursor) {
|
||||
if let ConversationEntry::Memory { key, .. } = entry {
|
||||
if seen.insert(key.clone()) {
|
||||
to_score.push((i, key.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let http = http_client();
|
||||
let mut new_cursor = cursor;
|
||||
let mut results = Vec::new();
|
||||
|
||||
for (pos, key) in &to_score {
|
||||
let end = pos + WINDOW;
|
||||
|
||||
// Not enough conversation after this memory yet — stop here
|
||||
if end > context.entries.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Need at least one assistant response in the window
|
||||
let range = *pos..end;
|
||||
if !context.entries[range.clone()].iter().any(|e| e.message().role == Role::Assistant) {
|
||||
new_cursor = end;
|
||||
continue;
|
||||
}
|
||||
|
||||
let _ = ui_tx.send(UiMessage::Activity(format!("scoring memory: {}...", key)));
|
||||
match score_divergence(&http, client, context, range, Filter::SkipKey(key)).await {
|
||||
Ok((divs, _)) => {
|
||||
let importance: f64 = divs.iter().sum();
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"[scoring] {} → {:.2}", key, importance,
|
||||
)));
|
||||
results.push((key.clone(), importance));
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = ui_tx.send(UiMessage::Debug(format!(
|
||||
"[scoring] {} FAILED: {:#}", key, e,
|
||||
)));
|
||||
}
|
||||
}
|
||||
new_cursor = end;
|
||||
}
|
||||
|
||||
let _ = ui_tx.send(UiMessage::Activity(String::new()));
|
||||
Ok((new_cursor, results))
|
||||
}
|
||||
|
||||
// ── Fine-tuning scoring ─────────────────────────────────────────
|
||||
|
||||
/// Score which recent responses are candidates for fine-tuning.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue