From e17c46edc14a1e051f859ff4af8481df66db62e6 Mon Sep 17 00:00:00 2001 From: spqrz Date: Sun, 12 Apr 2026 11:12:12 +0100 Subject: [PATCH 01/94] use html2md on web_fetch (fixes #3) --- Cargo.toml | 1 + src/agent/tools/web.rs | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 2c5246f..9186f64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ edition.workspace = true [dependencies] anyhow = "1" +html2md = "0.2" crossterm = { version = "0.29", features = ["event-stream", "bracketed-paste", "osc52"] } clap = { version = "4", features = ["derive"] } figment = { version = "0.10", features = ["env"] } diff --git a/src/agent/tools/web.rs b/src/agent/tools/web.rs index 7ad7fc9..15d011e 100644 --- a/src/agent/tools/web.rs +++ b/src/agent/tools/web.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use anyhow::{Context, Result}; use serde::Deserialize; +use html2md::parse_html; pub fn tools() -> [super::Tool; 2] { [ @@ -42,7 +43,9 @@ async fn web_fetch(args: &serde_json::Value) -> Result { let body = response.text().await .with_context(|| format!("failed to read body from {}", a.url))?; - Ok(super::truncate_output(body, 30000)) + // Convert HTML to Markdown, then truncate + let markdown = parse_html(&body); + Ok(super::truncate_output(markdown, 30000)) } // ── Search ────────────────────────────────────────────────────── From 7046e63b9d4088a0d04cf6a59f51033bd40d9126 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 15 Apr 2026 05:59:58 -0400 Subject: [PATCH 02/94] Include identity nodes in memory scoring Identity memory nodes now participate in importance scoring alongside conversation memories. Score loading/saving handles both sections, and the conscious screen uses node.label() consistently for memory display. Co-Authored-By: Proof of Concept --- src/mind/mod.rs | 65 +++++++++++++++++++++++++++------------ src/subconscious/learn.rs | 21 ++++++++++--- src/user/context.rs | 11 +++---- 3 files changed, 66 insertions(+), 31 deletions(-) diff --git a/src/mind/mod.rs b/src/mind/mod.rs index 9fcc101..a221e80 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -33,6 +33,36 @@ pub use unconscious::{UnconsciousSnapshot, Unconscious}; use crate::agent::context::{AstNode, NodeBody, Section, Ast, ContextState}; +fn match_scores( + nodes: &[AstNode], + scores: &std::collections::BTreeMap, +) -> Vec<(usize, f64)> { + nodes.iter().enumerate() + .filter_map(|(i, node)| { + if let AstNode::Leaf(leaf) = node { + if let NodeBody::Memory { key, .. } = leaf.body() { + return scores.get(key.as_str()).map(|&s| (i, s)); + } + } + None + }).collect() +} + +fn find_memory_by_key(ctx: &ContextState, key: &str) -> Option<(Section, usize)> { + [(Section::Identity, ctx.identity()), (Section::Conversation, ctx.conversation())] + .into_iter() + .find_map(|(section, nodes)| { + nodes.iter().enumerate().find_map(|(i, node)| { + if let AstNode::Leaf(leaf) = node { + if let NodeBody::Memory { key: k, .. } = leaf.body() { + if k == key { return Some((section, i)); } + } + } + None + }) + }) +} + fn load_memory_scores(ctx: &mut ContextState, path: &std::path::Path) { let data = match std::fs::read_to_string(path) { Ok(d) => d, @@ -42,25 +72,24 @@ fn load_memory_scores(ctx: &mut ContextState, path: &std::path::Path) { Ok(s) => s, Err(_) => return, }; - let mut applied = 0; - for i in 0..ctx.conversation().len() { - if let AstNode::Leaf(leaf) = &ctx.conversation()[i] { - if let NodeBody::Memory { key, .. } = leaf.body() { - if let Some(&s) = scores.get(key.as_str()) { - ctx.set_score(Section::Conversation, i, Some(s)); - applied += 1; - } - } - } + let identity_scores = match_scores(ctx.identity(), &scores); + let conv_scores = match_scores(ctx.conversation(), &scores); + let applied = identity_scores.len() + conv_scores.len(); + for (i, s) in identity_scores { + ctx.set_score(Section::Identity, i, Some(s)); + } + for (i, s) in conv_scores { + ctx.set_score(Section::Conversation, i, Some(s)); } if applied > 0 { dbglog!("[scoring] loaded {} scores from {}", applied, path.display()); } } -/// Collect scored memory keys from conversation entries. +/// Collect scored memory keys from identity and conversation entries. fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTreeMap { - ctx.conversation().iter() + ctx.identity().iter() + .chain(ctx.conversation().iter()) .filter_map(|node| { if let AstNode::Leaf(leaf) = node { if let NodeBody::Memory { key, score: Some(s), .. } = leaf.body() { @@ -531,14 +560,10 @@ impl Mind { async move { let scores_snapshot = { let mut ctx = agent.context.lock().await; - for i in 0..ctx.conversation().len() { - if let AstNode::Leaf(leaf) = &ctx.conversation()[i] { - if let NodeBody::Memory { key: k, .. } = leaf.body() { - if *k == key { - ctx.set_score(Section::Conversation, i, Some(score)); - } - } - } + // Find memory by key in identity or conversation + let found = find_memory_by_key(&ctx, &key); + if let Some((section, i)) = found { + ctx.set_score(section, i, Some(score)); } let snapshot = collect_memory_scores(&ctx); drop(ctx); diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index ec63df9..f9e5ab5 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -62,8 +62,16 @@ fn build_token_ids( for node in context.system() { ids.extend(node.token_ids()); } + // Identity nodes can be filtered by key for scoring for node in context.identity() { - ids.extend(node.token_ids()); + let skip = match &filter { + Filter::SkipKey(key) => memory_key(node) == Some(*key), + Filter::SkipAllMemories => is_memory(node), + _ => false, + }; + if !skip { + ids.extend(node.token_ids()); + } } for node in context.journal() { ids.extend(node.token_ids()); @@ -175,7 +183,9 @@ pub async fn score_memories( // Collect memory keys and response indices under a brief lock let (memory_keys, response_indices) = { let ctx = agent.context.lock().await; - let mut keys: Vec = ctx.conversation().iter() + // Include identity nodes and conversation memories + let mut keys: Vec = ctx.identity().iter() + .chain(ctx.conversation().iter()) .filter_map(|node| memory_key(node).map(String::from)) .collect(); keys.dedup(); @@ -331,7 +341,10 @@ where { let store = &*store_arc; - for (i, node) in context.conversation().iter().enumerate() { + // Identity nodes always score at position 0; conversation nodes at their index + let identity_nodes = context.identity().iter().map(|n| (0, n)); + let conv_nodes = context.conversation().iter().enumerate(); + for (pos, node) in identity_nodes.chain(conv_nodes) { if let Some(key) = memory_key(node) { if !seen.insert(key.to_owned()) { continue; } let last_scored = store.get_node(key) @@ -340,7 +353,7 @@ where .map(|n| n.last_scored) .unwrap_or(0); if now - last_scored >= max_age_secs { - candidates.push((i, key.to_owned(), last_scored)); + candidates.push((pos, key.to_owned(), last_scored)); } } } diff --git a/src/user/context.rs b/src/user/context.rs index a0692fa..6418f4c 100644 --- a/src/user/context.rs +++ b/src/user/context.rs @@ -37,17 +37,14 @@ impl ConsciousScreen { let mut unscored = 0usize; for node in ctx.conversation() { if let AstNode::Leaf(leaf) = node { - if let NodeBody::Memory { key, score, text } = leaf.body() { - let status = match score { - Some(s) => { scored += 1; format!("{:.2}", s) } - None => { unscored += 1; String::new() } - }; + if let NodeBody::Memory { score, text, .. } = leaf.body() { + if score.is_some() { scored += 1; } else { unscored += 1; } mem_children.push(SectionView { - name: key.clone(), + name: node.label(), tokens: node.tokens(), content: text.clone(), children: Vec::new(), - status, + status: String::new(), }); } } From 460394750641cc6a6b6d696062a5b787720b3292 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 15 Apr 2026 06:08:27 -0400 Subject: [PATCH 03/94] Display memory scores in status column Move score display from name (via label()) to status column for cleaner layout. Score now appears right of tokens for all memory nodes. Co-Authored-By: Proof of Concept --- src/user/context.rs | 6 +++--- src/user/widgets.rs | 25 +++++++++++++++++-------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/user/context.rs b/src/user/context.rs index 6418f4c..4cfa78d 100644 --- a/src/user/context.rs +++ b/src/user/context.rs @@ -37,14 +37,14 @@ impl ConsciousScreen { let mut unscored = 0usize; for node in ctx.conversation() { if let AstNode::Leaf(leaf) = node { - if let NodeBody::Memory { score, text, .. } = leaf.body() { + if let NodeBody::Memory { key, score, text } = leaf.body() { if score.is_some() { scored += 1; } else { unscored += 1; } mem_children.push(SectionView { - name: node.label(), + name: format!("mem: {}", key), tokens: node.tokens(), content: text.clone(), children: Vec::new(), - status: String::new(), + status: score.map(|s| format!("{:.2}", s)).unwrap_or_default(), }); } } diff --git a/src/user/widgets.rs b/src/user/widgets.rs index 82a0f05..6b2a11d 100644 --- a/src/user/widgets.rs +++ b/src/user/widgets.rs @@ -6,7 +6,7 @@ use ratatui::{ widgets::{Block, Borders}, crossterm::event::KeyCode, }; -use crate::agent::context::{AstNode, Ast}; +use crate::agent::context::{AstNode, Ast, NodeBody}; #[derive(Debug, Clone)] pub struct SectionView { @@ -20,13 +20,22 @@ pub struct SectionView { fn node_to_view(node: &AstNode) -> SectionView { match node { - AstNode::Leaf(leaf) => SectionView { - name: node.label(), - tokens: node.tokens(), - content: leaf.body().text().to_string(), - children: Vec::new(), - status: String::new(), - }, + AstNode::Leaf(leaf) => { + let (name, status) = match leaf.body() { + NodeBody::Memory { key, score, .. } => { + let s = score.map(|v| format!("{:.2}", v)).unwrap_or_default(); + (format!("mem: {}", key), s) + } + _ => (node.label(), String::new()), + }; + SectionView { + name, + tokens: node.tokens(), + content: leaf.body().text().to_string(), + children: Vec::new(), + status, + } + } AstNode::Branch { children, .. } => { let child_views: Vec = children.iter() .map(|c| node_to_view(c)) From 81e0632cf36b9400a08d496f4198584c3256ed14 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 15 Apr 2026 21:52:20 -0400 Subject: [PATCH 04/94] DMN: wire dream hours reminder into Foraging state The hours_since_last_dream() function existed but wasn't called after refactoring moved the DMN prompts from hooks to Rust. Now shows "You haven't dreamed in X hours" when >= 18h since last dream session. Co-Authored-By: Proof of Concept --- src/mind/subconscious.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/mind/subconscious.rs b/src/mind/subconscious.rs index d5bee34..15c8b04 100644 --- a/src/mind/subconscious.rs +++ b/src/mind/subconscious.rs @@ -20,6 +20,7 @@ use std::path::PathBuf; use std::time::{Duration, Instant}; +use crate::thalamus::idle::{hours_since_last_dream, DREAM_INTERVAL_HOURS}; /// DMN state machine. #[derive(Debug, Clone)] @@ -138,10 +139,22 @@ impl State { ) } State::Foraging => { + let dream_hint = { + let hours = hours_since_last_dream(); + if hours >= DREAM_INTERVAL_HOURS { + format!( + " You haven't dreamed in {} hours — consider running \ + ~/.consciousness/tools/dream-start.sh.", + hours + ) + } else { + String::new() + } + }; format!( "[dmn] Foraging time. {} Follow whatever catches your attention — \ - memory files, code, ideas. Call yield_to_user when you want to rest.{}", - idle_info, stuck_warning + memory files, code, ideas. Call yield_to_user when you want to rest.{}{}", + idle_info, dream_hint, stuck_warning ) } State::Resting { since } => { From b649a11645fd5a7b66e0b0207e61d0c552363b8a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 15 Apr 2026 21:58:03 -0400 Subject: [PATCH 05/94] hours_since_last_dream: return 0 if dream in progress The function was reading from dream-log.jsonl which only updates when dreams complete. If a dream session was started but not yet ended, it would show stale hours. Now checks for active dream state first. Co-Authored-By: Proof of Concept --- src/thalamus/idle.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/thalamus/idle.rs b/src/thalamus/idle.rs index 6c78b19..71baa81 100644 --- a/src/thalamus/idle.rs +++ b/src/thalamus/idle.rs @@ -372,6 +372,10 @@ impl State { } pub fn hours_since_last_dream() -> u64 { + // If a dream is currently in progress, no nudge needed + if home().join(".consciousness/state/dream-state").exists() { + return 0; + } let path = home().join(".consciousness/logs/dream-log.jsonl"); let content = match fs::read_to_string(path) { Ok(c) if !c.is_empty() => c, From a73bcf5ae3713f0e79abccf3d8224d355d85450c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 15 Apr 2026 23:16:53 -0400 Subject: [PATCH 06/94] training: restructure as vLLM plugin package MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Convert to installable package with entry points for vLLM auto-discovery - Add checkpoint_sync.py: Python replacement for Rust checkpoint binary - Block-level diffing of safetensors files (4KB blocks) - vLLM→HF weight name conversion built-in - Scheduled 10min after training jobs (batched) - API change: /train now takes raw token IDs (context_ids + continuation_ids) - No tokenizer on training side, client owns tokenization - Remove superseded code: standalone scripts, Rust binary, tokenizer helpers Install: pip install -e ./training Then vLLM auto-loads via entry point. Co-Authored-By: Proof of Concept --- training/apollo_plugin/__init__.py | 17 + training/apollo_plugin/checkpoint_sync.py | 500 ++++++++++++++++++ .../export_hook.py} | 17 +- .../optimizer.py} | 0 .../steering.py} | 0 .../{ => apollo_plugin}/weight_mapping.py | 0 .../worker.py} | 72 ++- training/checkpoint/Cargo.toml | 12 - training/checkpoint/src/main.rs | 265 ---------- training/export_weights.py | 87 --- training/first_training_step.py | 215 -------- training/pyproject.toml | 28 + training/start_vllm_with_apollo.sh | 18 - training/train.py | 269 ---------- training/training_example.py | 175 ------ 15 files changed, 607 insertions(+), 1068 deletions(-) create mode 100644 training/apollo_plugin/__init__.py create mode 100644 training/apollo_plugin/checkpoint_sync.py rename training/{vllm_export_hook.py => apollo_plugin/export_hook.py} (82%) rename training/{apollo_mini.py => apollo_plugin/optimizer.py} (100%) rename training/{extract_steering_vector.py => apollo_plugin/steering.py} (100%) rename training/{ => apollo_plugin}/weight_mapping.py (100%) rename training/{apollo_worker.py => apollo_plugin/worker.py} (87%) delete mode 100644 training/checkpoint/Cargo.toml delete mode 100644 training/checkpoint/src/main.rs delete mode 100644 training/export_weights.py delete mode 100644 training/first_training_step.py create mode 100644 training/pyproject.toml delete mode 100755 training/start_vllm_with_apollo.sh delete mode 100644 training/train.py delete mode 100644 training/training_example.py diff --git a/training/apollo_plugin/__init__.py b/training/apollo_plugin/__init__.py new file mode 100644 index 0000000..bfbecd0 --- /dev/null +++ b/training/apollo_plugin/__init__.py @@ -0,0 +1,17 @@ +"""Apollo training plugin for vLLM. + +Enables continuous fine-tuning alongside live inference by: +1. Exporting CUDA IPC handles for weight sharing +2. Providing a training worker daemon (/train endpoint) +3. Block-level checkpoint sync to safetensors files + +Install: pip install -e /path/to/training +Then vLLM auto-loads via entry point. +""" + +from .export_hook import _patch_model_runner + + +def register(): + """Called by vLLM's plugin loader on startup.""" + _patch_model_runner() diff --git a/training/apollo_plugin/checkpoint_sync.py b/training/apollo_plugin/checkpoint_sync.py new file mode 100644 index 0000000..eff93cc --- /dev/null +++ b/training/apollo_plugin/checkpoint_sync.py @@ -0,0 +1,500 @@ +"""Sync live GPU weights to safetensors files on disk. + +Reads vLLM weight tensors via CUDA IPC handles, converts from vLLM's +merged layout to HuggingFace's separate layout, diffs block-by-block +against on-disk safetensors files, and writes only changed blocks. + +For small behavioral training steps, this turns a 54GB checkpoint +write into a few hundred MB of actual disk I/O. + +Usage: + # Sync live weights to disk + python checkpoint_sync.py sync --model-dir /path/to/Qwen3.5-27B + + # Debug name mapping issues + python checkpoint_sync.py diagnose --model-dir /path/to/Qwen3.5-27B + + # From Python: + from checkpoint_sync import checkpoint_sync + result = checkpoint_sync("/path/to/model") +""" + +import json +import mmap +import struct +import sys +from pathlib import Path +from typing import Dict, List, Tuple, Any +import logging + +import torch + +logger = logging.getLogger(__name__) + +DEFAULT_BLOCK_SIZE = 4096 # 4KB blocks — matches filesystem block size +DEFAULT_HANDLES_PATH = "/tmp/vllm_weight_handles.pt" + + +# --------------------------------------------------------------------------- +# vLLM → HuggingFace weight name/shape conversion +# --------------------------------------------------------------------------- +# Qwen3.5-27B dimensions (could be read from config.json for generality) + +HIDDEN = 5120 +NUM_K_HEADS = 16 +NUM_V_HEADS = 48 +HEAD_K_DIM = 128 +HEAD_V_DIM = 128 +KEY_DIM = NUM_K_HEADS * HEAD_K_DIM # 2048 +VALUE_DIM = NUM_V_HEADS * HEAD_V_DIM # 6144 +INTERMEDIATE = 17408 + +# Full attention (some layers use standard attention, not GDN) +NUM_ATTN_HEADS = 24 +NUM_ATTN_KV_HEADS = 4 +ATTN_HEAD_DIM = 256 +ATTN_Q_HEAD_DIM = ATTN_HEAD_DIM * 2 # 512 +ATTN_Q_DIM = NUM_ATTN_HEADS * ATTN_Q_HEAD_DIM # 12288 +ATTN_K_DIM = NUM_ATTN_KV_HEADS * ATTN_HEAD_DIM # 1024 +ATTN_V_DIM = NUM_ATTN_KV_HEADS * ATTN_HEAD_DIM # 1024 + + +def vllm_to_hf_tensors(vllm_params: Dict[str, torch.Tensor] + ) -> Dict[str, torch.Tensor]: + """Convert vLLM merged weights to HF-compatible separate tensors. + + vLLM merges certain projections for efficiency: + - qkv_proj (full attn) → q_proj, k_proj, v_proj + - in_proj_qkvz (GDN) → in_proj_qkv, in_proj_z + - in_proj_ba (GDN) → in_proj_b, in_proj_a + - gate_up_proj (MLP) → gate_proj, up_proj + + Returns views that share GPU memory with the original tensors. + """ + hf_params = {} + + for name, tensor in vllm_params.items(): + # Strip vLLM's 'language_model.' prefix to match HF naming + hf_name = name.removeprefix('language_model.') + + if 'in_proj_qkvz' in name: + # GDN layer: [key*2 + value*2, hidden] → qkv + z + prefix = hf_name.replace('in_proj_qkvz.weight', '') + split_at = KEY_DIM * 2 + VALUE_DIM + hf_params[prefix + 'in_proj_qkv.weight'] = tensor[:split_at] + hf_params[prefix + 'in_proj_z.weight'] = tensor[split_at:] + + elif 'in_proj_ba' in name: + # GDN layer: [num_v_heads*2, hidden] → b + a + prefix = hf_name.replace('in_proj_ba.weight', '') + hf_params[prefix + 'in_proj_b.weight'] = tensor[:NUM_V_HEADS] + hf_params[prefix + 'in_proj_a.weight'] = tensor[NUM_V_HEADS:] + + elif 'qkv_proj' in name: + # Full attention: [q + k + v, hidden] → separate + prefix = hf_name.replace('qkv_proj.weight', '') + hf_params[prefix + 'q_proj.weight'] = tensor[:ATTN_Q_DIM] + hf_params[prefix + 'k_proj.weight'] = tensor[ATTN_Q_DIM:ATTN_Q_DIM + ATTN_K_DIM] + hf_params[prefix + 'v_proj.weight'] = tensor[ATTN_Q_DIM + ATTN_K_DIM:] + + elif 'gate_up_proj' in name: + # MLP: [intermediate*2, hidden] → gate + up + prefix = hf_name.replace('gate_up_proj.weight', '') + hf_params[prefix + 'gate_proj.weight'] = tensor[:INTERMEDIATE] + hf_params[prefix + 'up_proj.weight'] = tensor[INTERMEDIATE:] + + else: + # Pass through unchanged + hf_params[hf_name] = tensor + + return hf_params + + +# --------------------------------------------------------------------------- +# Safetensors file handling +# --------------------------------------------------------------------------- + +def read_safetensors_index(model_dir: Path) -> Dict[str, str]: + """Map tensor names to safetensors filenames. + + For sharded models, reads model.safetensors.index.json. + For single-file models, returns empty dict (default to model.safetensors). + """ + index_path = model_dir / "model.safetensors.index.json" + if not index_path.exists(): + return {} + + with open(index_path) as f: + index = json.load(f) + + return dict(index.get("weight_map", {})) + + +def parse_safetensors_header(data: memoryview) -> Tuple[int, dict]: + """Parse safetensors file header. + + Returns (data_start_offset, header_dict). + Header dict maps tensor names to metadata including 'data_offsets'. + """ + header_size = struct.unpack(' Tuple[int, int]: + """Sync a single tensor to mmap'd file using block-level diffing. + + Returns (bytes_compared, bytes_changed). + """ + start = data_start + offsets[0] + end = data_start + offsets[1] + disk_len = end - start + + # Transfer tensor to CPU and get raw bytes + # Use .detach() to avoid autograd overhead, .contiguous() for memory layout + try: + live_bytes = tensor.detach().contiguous().cpu().numpy().tobytes() + except Exception as e: + logger.warning(f"Failed to transfer {name} to CPU: {e}") + return 0, 0 + + if len(live_bytes) != disk_len: + logger.warning( + f"Size mismatch for {name}: disk={disk_len}, live={len(live_bytes)} " + f"(shape={list(tensor.shape)}, dtype={tensor.dtype})" + ) + return 0, 0 + + # Block-level diff: compare and write only changed blocks + compared = 0 + changed = 0 + offset = 0 + + while offset < disk_len: + block_end = min(offset + block_size, disk_len) + block_len = block_end - offset + + disk_block = mm[start + offset:start + block_end] + live_block = live_bytes[offset:block_end] + + compared += block_len + + if disk_block != live_block: + mm[start + offset:start + block_end] = live_block + changed += block_len + + offset = block_end + + return compared, changed + + +def sync_file( + file_path: Path, + tensors: Dict[str, torch.Tensor], + block_size: int, +) -> Tuple[int, int, int, int]: + """Sync tensors to a single safetensors file. + + Returns (bytes_compared, bytes_changed, tensors_found, tensors_missing). + """ + with open(file_path, 'r+b') as f: + mm = mmap.mmap(f.fileno(), 0) + + try: + data_start, header = parse_safetensors_header(memoryview(mm)) + + total_compared = 0 + total_changed = 0 + found = 0 + missing = 0 + + for name, tensor in tensors.items(): + if name == "__metadata__": + continue + + if name not in header: + missing += 1 + continue + + found += 1 + meta = header[name] + offsets = meta['data_offsets'] + + compared, changed = sync_tensor_to_mmap( + mm, name, tensor, data_start, offsets, block_size + ) + total_compared += compared + total_changed += changed + + # Flush changes to disk + if total_changed > 0: + mm.flush() + + return total_compared, total_changed, found, missing + + finally: + mm.close() + + +# --------------------------------------------------------------------------- +# Main entry point +# --------------------------------------------------------------------------- + +def load_vllm_weights(handles_path: str) -> Dict[str, torch.Tensor]: + """Load vLLM weight tensors from CUDA IPC handles. + + The handles file is written by vllm_export_hook.py on vLLM startup. + Each handle can be used to reconstruct a tensor pointing to vLLM's + GPU memory — no copy, direct access. + """ + handles = torch.load(handles_path, weights_only=False) + + weights = {} + for name, info in handles.items(): + func, args = info['handle'] + try: + weights[name] = func(*args) + except Exception as e: + logger.warning(f"Failed to reconstruct {name}: {e}") + + return weights + + +def checkpoint_sync( + model_dir: str, + handles_path: str = DEFAULT_HANDLES_PATH, + block_size: int = DEFAULT_BLOCK_SIZE, +) -> Dict[str, Any]: + """Sync live GPU weights to model safetensors files. + + This is the main entry point. Call this after training steps + or periodically to checkpoint weights without full serialization. + + Args: + model_dir: Directory containing safetensors files + handles_path: Path to vLLM weight IPC handles file + block_size: Block size for diffing (default 4KB) + + Returns: + Dict with sync statistics: + - total_compared: bytes compared + - total_changed: bytes actually written + - files_changed: list of modified filenames + - tensors_synced: number of tensors processed + - tensors_missing: tensors not found in safetensors + """ + model_dir = Path(model_dir) + + if not Path(handles_path).exists(): + raise FileNotFoundError( + f"Weight handles not found: {handles_path}. " + "Is vLLM running with the export hook?" + ) + + # Step 1: Load live weights from GPU via IPC + logger.info("Loading live weights from GPU...") + vllm_weights = load_vllm_weights(handles_path) + logger.info(f" Loaded {len(vllm_weights)} vLLM tensors") + + # Step 2: Convert to HF naming/layout + hf_weights = vllm_to_hf_tensors(vllm_weights) + logger.info(f" Converted to {len(hf_weights)} HF tensors") + + # Step 3: Map tensors to safetensors files + weight_map = read_safetensors_index(model_dir) + + by_file: Dict[str, Dict[str, torch.Tensor]] = {} + unmapped = [] + + for name, tensor in hf_weights.items(): + filename = weight_map.get(name) + if filename is None: + # Single-file model or missing from index + if (model_dir / "model.safetensors").exists(): + filename = "model.safetensors" + else: + unmapped.append(name) + continue + by_file.setdefault(filename, {})[name] = tensor + + if unmapped: + logger.warning(f" {len(unmapped)} tensors not in index: {unmapped[:3]}...") + + # Step 4: Sync each file + total_compared = 0 + total_changed = 0 + total_found = 0 + total_missing = 0 + files_changed = [] + + for filename in sorted(by_file.keys()): + tensors = by_file[filename] + file_path = model_dir / filename + + if not file_path.exists(): + logger.warning(f" File not found: {filename}") + total_missing += len(tensors) + continue + + compared, changed, found, missing = sync_file(file_path, tensors, block_size) + + total_compared += compared + total_changed += changed + total_found += found + total_missing += missing + + if changed > 0: + files_changed.append(filename) + logger.info(f" {filename}: {changed / 1e6:.2f} MB changed ({found} tensors)") + + # Summary + if total_changed == 0: + logger.info("No changes - model files are up to date") + else: + pct = (total_changed / total_compared * 100) if total_compared > 0 else 0 + logger.info( + f"Synced: {total_changed / 1e6:.2f} MB changed / " + f"{total_compared / 1e9:.2f} GB compared ({pct:.3f}%)" + ) + + if total_missing > 0: + logger.warning(f" {total_missing} tensors not found in safetensors files") + + return { + "total_compared": total_compared, + "total_changed": total_changed, + "files_changed": files_changed, + "tensors_synced": total_found, + "tensors_missing": total_missing, + } + + +# --------------------------------------------------------------------------- +# Diagnostics +# --------------------------------------------------------------------------- + +def diagnose(model_dir: str, handles_path: str = DEFAULT_HANDLES_PATH): + """Print diagnostic info about weight name mappings. + + Useful for debugging mismatches between vLLM and safetensors names. + """ + model_dir = Path(model_dir) + + # Load and convert vLLM weights + vllm_weights = load_vllm_weights(handles_path) + hf_weights = vllm_to_hf_tensors(vllm_weights) + hf_names = set(hf_weights.keys()) + + # Read safetensors index + weight_map = read_safetensors_index(model_dir) + disk_names = set(weight_map.keys()) + + # If single-file model, parse that file's header + if not disk_names: + st_path = model_dir / "model.safetensors" + if st_path.exists(): + with open(st_path, 'rb') as f: + mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + _, header = parse_safetensors_header(memoryview(mm)) + disk_names = {k for k in header.keys() if k != "__metadata__"} + mm.close() + + print(f"vLLM tensors (raw): {len(vllm_weights)}") + print(f"HF tensors (converted): {len(hf_names)}") + print(f"Disk tensors: {len(disk_names)}") + print() + + in_both = hf_names & disk_names + only_hf = hf_names - disk_names + only_disk = disk_names - hf_names + + print(f"Matched: {len(in_both)}") + print(f"Only in HF (won't sync): {len(only_hf)}") + print(f"Only on disk (not updated): {len(only_disk)}") + + if only_hf: + print(f"\nSample HF-only: {sorted(only_hf)[:5]}") + if only_disk: + print(f"\nSample disk-only: {sorted(only_disk)[:5]}") + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + +def main(): + import argparse + + parser = argparse.ArgumentParser( + description="Sync live GPU weights to safetensors files" + ) + subparsers = parser.add_subparsers(dest="command", help="Command") + + # sync command + sync_parser = subparsers.add_parser("sync", help="Sync weights to disk") + sync_parser.add_argument( + "--model-dir", required=True, + help="Directory containing safetensors files" + ) + sync_parser.add_argument( + "--handles", default=DEFAULT_HANDLES_PATH, + help=f"Path to IPC handles (default: {DEFAULT_HANDLES_PATH})" + ) + sync_parser.add_argument( + "--block-size", type=int, default=DEFAULT_BLOCK_SIZE, + help=f"Block size for diffing (default: {DEFAULT_BLOCK_SIZE})" + ) + sync_parser.add_argument( + "-v", "--verbose", action="store_true", + help="Verbose output" + ) + + # diagnose command + diag_parser = subparsers.add_parser("diagnose", help="Check name mappings") + diag_parser.add_argument( + "--model-dir", required=True, + help="Directory containing safetensors files" + ) + diag_parser.add_argument( + "--handles", default=DEFAULT_HANDLES_PATH, + help=f"Path to IPC handles (default: {DEFAULT_HANDLES_PATH})" + ) + + args = parser.parse_args() + + if args.command is None: + parser.print_help() + sys.exit(1) + + logging.basicConfig( + level=logging.DEBUG if getattr(args, 'verbose', False) else logging.INFO, + format='%(message)s' + ) + + try: + if args.command == "sync": + result = checkpoint_sync(args.model_dir, args.handles, args.block_size) + print(json.dumps(result, indent=2)) + elif args.command == "diagnose": + diagnose(args.model_dir, args.handles) + except FileNotFoundError as e: + logger.error(str(e)) + sys.exit(1) + except Exception as e: + logger.exception(f"Failed: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/training/vllm_export_hook.py b/training/apollo_plugin/export_hook.py similarity index 82% rename from training/vllm_export_hook.py rename to training/apollo_plugin/export_hook.py index 6a0bf1e..4853930 100644 --- a/training/vllm_export_hook.py +++ b/training/apollo_plugin/export_hook.py @@ -1,17 +1,12 @@ """Monkey-patch vLLM to export weight IPC handles on startup. -Usage — add to start_vllm.sh BEFORE the vllm serve command: +Usage — install the apollo_plugin package: - export VLLM_PLUGINS=vllm_export_hook - vllm serve Qwen/Qwen3.5-27B ... + pip install -e /path/to/training -Or use Python to launch vLLM with the hook: +Then vLLM auto-discovers and loads via entry point. Or filter: - python3 -c " - import vllm_export_hook # installs the patch - from vllm.entrypoints.openai.api_server import run_server - run_server(...) - " + VLLM_PLUGINS=apollo vllm serve Qwen/Qwen3.5-27B ... The hook patches vLLM's model runner to export IPC handles after model loading completes. The handles are saved to a file that the @@ -70,7 +65,3 @@ def _patch_model_runner(): gpu_worker.Worker.load_model = patched_load print("[apollo] Weight export hook installed") - - -# Auto-install when imported -_patch_model_runner() diff --git a/training/apollo_mini.py b/training/apollo_plugin/optimizer.py similarity index 100% rename from training/apollo_mini.py rename to training/apollo_plugin/optimizer.py diff --git a/training/extract_steering_vector.py b/training/apollo_plugin/steering.py similarity index 100% rename from training/extract_steering_vector.py rename to training/apollo_plugin/steering.py diff --git a/training/weight_mapping.py b/training/apollo_plugin/weight_mapping.py similarity index 100% rename from training/weight_mapping.py rename to training/apollo_plugin/weight_mapping.py diff --git a/training/apollo_worker.py b/training/apollo_plugin/worker.py similarity index 87% rename from training/apollo_worker.py rename to training/apollo_plugin/worker.py index d46fb55..5d9ba29 100755 --- a/training/apollo_worker.py +++ b/training/apollo_plugin/worker.py @@ -74,6 +74,9 @@ class TrainingJob: 'error': self.error, } +CHECKPOINT_DELAY_SECS = 10 * 60 # 10 minutes + + class ApolloWorker: def __init__(self, config_path: str = "/home/kent/poc/consciousness/training/config.json"): self.config = self._load_config(config_path) @@ -81,6 +84,7 @@ class ApolloWorker: self.vllm_paused = False self.app = web.Application() self._setup_routes() + self._checkpoint_timer: Optional[asyncio.Task] = None def _load_config(self, config_path: str) -> Dict[str, Any]: """Load configuration from file or use defaults.""" @@ -230,8 +234,11 @@ class ApolloWorker: # Mark job as completed job.status = TrainingStatus.COMPLETED job.completed_at = datetime.now() - + logger.info(f"Training job {job.job_id} completed successfully") + + # Schedule checkpoint sync (batched — won't duplicate if timer pending) + self.schedule_checkpoint_sync() except Exception as e: logger.error(f"Training job {job.job_id} failed: {e}") @@ -278,6 +285,43 @@ class ApolloWorker: except Exception as e: logger.warning(f"Failed to resume vLLM: {e}") + def schedule_checkpoint_sync(self): + """Schedule a checkpoint sync in 10 minutes, if not already scheduled. + + This batches multiple training runs into a single sync — the timer + resets only when no timer is pending. + """ + if self._checkpoint_timer is not None: + logger.debug("Checkpoint sync already scheduled, skipping") + return + + self._checkpoint_timer = asyncio.create_task(self._checkpoint_sync_after_delay()) + logger.info(f"Checkpoint sync scheduled in {CHECKPOINT_DELAY_SECS // 60} minutes") + + async def _checkpoint_sync_after_delay(self): + """Wait then sync — the actual timer task.""" + try: + await asyncio.sleep(CHECKPOINT_DELAY_SECS) + await self._do_checkpoint_sync() + except asyncio.CancelledError: + logger.debug("Checkpoint sync cancelled") + finally: + self._checkpoint_timer = None + + async def _do_checkpoint_sync(self): + """Execute the checkpoint sync.""" + try: + from apollo_plugin.checkpoint_sync import checkpoint_sync + logger.info("Starting checkpoint sync...") + result = checkpoint_sync( + self.config['model_path'], + self.config.get('weight_handles', '/tmp/vllm_weight_handles.pt'), + ) + changed_mb = result['total_changed'] / 1e6 + logger.info(f"Checkpoint sync complete: {changed_mb:.2f} MB written") + except Exception as e: + logger.error(f"Checkpoint sync failed: {e}") + async def load_model_for_training(self) -> nn.Module: """Load HF model with weights pointing to vLLM's GPU memory. @@ -299,22 +343,24 @@ class ApolloWorker: logger.info(f"Imported {len(vllm_params)} parameters") # Map vLLM merged layout → HF separate layout (views, no copies) - from weight_mapping import load_hf_model_with_vllm_weights + from apollo_plugin.weight_mapping import load_hf_model_with_vllm_weights model = load_hf_model_with_vllm_weights(vllm_params, model_path) logger.info("HF model constructed with vLLM weight views") return model async def run_apollo_training(self, model: nn.Module, - samples: List[Dict[str, str]], + samples: List[Dict[str, Any]], config: Dict[str, Any]) -> List[float]: - """Run Apollo-Mini training on conversation decision points.""" - from apollo_mini import Apollo - from transformers import AutoTokenizer + """Run Apollo-Mini training on conversation decision points. + + Each sample has: + context_ids: token IDs for frozen context (no gradients) + continuation_ids: token IDs for the decision we're training on + """ + from apollo_plugin.optimizer import Apollo lr = config.get('learning_rate', self.config['learning_rate']) - tokenizer = AutoTokenizer.from_pretrained( - self.config['model_path'], trust_remote_code=True) # Build parameter groups (Apollo for 2D+, standard for small/1D) apollo_params, standard_params = [], [] @@ -340,12 +386,10 @@ class ApolloWorker: loss_history = [] for i, sample in enumerate(samples): - context = sample.get('context', '') - continuation = sample.get('continuation', '') - - # Tokenize - ctx_ids = tokenizer.encode(context, add_special_tokens=True) - cont_ids = tokenizer.encode(continuation, add_special_tokens=False) + # context_ids: frozen (forward only, no gradients) + # continuation_ids: the decision we're training on + ctx_ids = sample['context_ids'] + cont_ids = sample['continuation_ids'] all_ids = ctx_ids + cont_ids context_len = len(ctx_ids) diff --git a/training/checkpoint/Cargo.toml b/training/checkpoint/Cargo.toml deleted file mode 100644 index 45e511a..0000000 --- a/training/checkpoint/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "apollo-checkpoint" -version = "0.1.0" -edition = "2024" - -[dependencies] -memmap2 = "0.9" -safetensors = "0.5" -serde = { version = "1", features = ["derive"] } -serde_json = "1" -anyhow = "1" -clap = { version = "4", features = ["derive"] } diff --git a/training/checkpoint/src/main.rs b/training/checkpoint/src/main.rs deleted file mode 100644 index 1ebd0df..0000000 --- a/training/checkpoint/src/main.rs +++ /dev/null @@ -1,265 +0,0 @@ -// apollo-checkpoint — Sync live GPU weights back to model files on disk. -// -// mmaps the model's safetensors files, reads live weights from GPU via -// Python helper (CUDA IPC handles), compares block by block, and memcpys -// only changed regions back into the mmap. For small behavioral training -// steps, this turns a 54GB write into a few hundred MB. -// -// The model files on disk are the checkpoint. No separate checkpoint -// directory — just keep the model up to date. -// -// Usage: -// apollo-checkpoint sync \ -// --handles /tmp/vllm_weight_handles.pt \ -// --model-dir /path/to/Qwen3.5-27B -// -// Runs every 10 minutes via cron. Daily rsync to moria. - -use anyhow::{Context, Result, bail}; -use clap::{Parser, Subcommand}; -use memmap2::MmapMut; -use std::collections::HashMap; -use std::fs; -use std::path::{Path, PathBuf}; -use std::process::Command; - -#[derive(Parser)] -#[command(name = "apollo-checkpoint", about = "Sync live GPU weights to model files")] -struct Cli { - #[command(subcommand)] - command: Cmd, -} - -#[derive(Subcommand)] -enum Cmd { - /// Sync live GPU weights back to model safetensors files - Sync { - /// Path to vLLM weight IPC handles - #[arg(long, default_value = "/tmp/vllm_weight_handles.pt")] - handles: PathBuf, - - /// Model directory containing safetensors files - #[arg(long)] - model_dir: PathBuf, - - /// Block size for diffing (bytes) - #[arg(long, default_value_t = 4096)] - block_size: usize, - }, -} - -/// Dump live GPU weights to a flat binary file, ordered by safetensors -/// file and offset to match the on-disk layout. -/// -/// Returns a map of (safetensors filename, tensor name) → raw bytes. -fn dump_live_weights(handles_path: &Path, output_dir: &Path) -> Result>> { - let dump_path = output_dir.join(".live_dump.bin"); - let index_path = output_dir.join(".live_dump.json"); - - let status = Command::new("python3") - .arg("-c") - .arg(format!(r#" -import torch, json - -handles = torch.load("{handles}", weights_only=False) -index = {{}} -offset = 0 - -with open("{dump}", "wb") as f: - for name in sorted(handles.keys()): - info = handles[name] - func, args = info["handle"] - tensor = func(*args) - data = tensor.contiguous().cpu().numpy().tobytes() - f.write(data) - index[name] = {{"offset": offset, "size": len(data)}} - offset += len(data) - -with open("{index}", "w") as f: - json.dump(index, f) - -print(f"Dumped {{len(index)}} tensors, {{offset / 1e9:.1f}} GB") -"#, - handles = handles_path.display(), - dump = dump_path.display(), - index = index_path.display(), - )) - .status() - .context("Failed to run Python weight dump")?; - - if !status.success() { - bail!("Python weight dump failed"); - } - - let index_str = fs::read_to_string(&index_path)?; - let index: HashMap = serde_json::from_str(&index_str)?; - let dump_data = fs::read(&dump_path)?; - - let mut result = HashMap::new(); - for (name, entry) in &index { - result.insert(name.clone(), dump_data[entry.offset..entry.offset + entry.size].to_vec()); - } - - // Clean up temp files - let _ = fs::remove_file(&dump_path); - let _ = fs::remove_file(&index_path); - - Ok(result) -} - -#[derive(serde::Deserialize)] -struct DumpEntry { - offset: usize, - size: usize, -} - -/// Read the safetensors index to map parameter names to files. -fn read_safetensors_index(model_dir: &Path) -> Result> { - let index_path = model_dir.join("model.safetensors.index.json"); - if !index_path.exists() { - // Single file model - return Ok(HashMap::new()); - } - - let index_str = fs::read_to_string(&index_path)?; - let index: serde_json::Value = serde_json::from_str(&index_str)?; - let weight_map = index["weight_map"] - .as_object() - .context("No weight_map in index")?; - - let mut result = HashMap::new(); - for (name, file) in weight_map { - result.insert(name.clone(), file.as_str().unwrap().to_string()); - } - Ok(result) -} - -/// Sync changed blocks from live weights into a mmap'd safetensors file. -/// Returns (total_bytes_compared, bytes_changed). -fn sync_tensors_to_file( - file_path: &Path, - tensors: &[(String, Vec)], - block_size: usize, -) -> Result<(usize, usize)> { - use safetensors::SafeTensors; - - let file = fs::OpenOptions::new() - .read(true) - .write(true) - .open(file_path) - .with_context(|| format!("Failed to open {}", file_path.display()))?; - - let mut mmap = unsafe { MmapMut::map_mut(&file)? }; - - // Parse safetensors header to find tensor offsets - let header_size = u64::from_le_bytes(mmap[..8].try_into().unwrap()) as usize; - let header_json: serde_json::Value = - serde_json::from_slice(&mmap[8..8 + header_size])?; - let data_start = 8 + header_size; - - let mut total_compared = 0usize; - let mut total_changed = 0usize; - - for (name, live_data) in tensors { - let meta = match header_json.get(name) { - Some(m) => m, - None => { - eprintln!(" Warning: {} not found in {}", name, file_path.display()); - continue; - } - }; - - let offsets = meta["data_offsets"].as_array().unwrap(); - let start = data_start + offsets[0].as_u64().unwrap() as usize; - let end = data_start + offsets[1].as_u64().unwrap() as usize; - let disk_data = &mmap[start..end]; - - if disk_data.len() != live_data.len() { - eprintln!(" Warning: size mismatch for {}: disk={} live={}", - name, disk_data.len(), live_data.len()); - continue; - } - - // Diff block by block, memcpy only changed blocks - let mut offset = 0; - while offset < disk_data.len() { - let block_end = (offset + block_size).min(disk_data.len()); - total_compared += block_end - offset; - - if disk_data[offset..block_end] != live_data[offset..block_end] { - mmap[start + offset..start + block_end] - .copy_from_slice(&live_data[offset..block_end]); - total_changed += block_end - offset; - } - offset = block_end; - } - } - - mmap.flush()?; - Ok((total_compared, total_changed)) -} - -fn cmd_sync(handles: PathBuf, model_dir: PathBuf, block_size: usize) -> Result<()> { - if !handles.exists() { - bail!("Weight handles not found: {}. Is vLLM running with the export hook?", - handles.display()); - } - - eprintln!("Dumping live weights from GPU..."); - let live_weights = dump_live_weights(&handles, &model_dir)?; - eprintln!(" {} tensors dumped", live_weights.len()); - - // Map parameter names to safetensors files - let weight_map = read_safetensors_index(&model_dir)?; - - // Group tensors by safetensors file - let mut by_file: HashMap)>> = HashMap::new(); - for (name, data) in live_weights { - let file = weight_map - .get(&name) - .cloned() - .unwrap_or_else(|| "model.safetensors".to_string()); - by_file.entry(file).or_default().push((name, data)); - } - - let mut total_compared = 0usize; - let mut total_changed = 0usize; - - for (filename, tensors) in &by_file { - let file_path = model_dir.join(filename); - if !file_path.exists() { - eprintln!(" Warning: {} not found, skipping", filename); - continue; - } - - let (compared, changed) = sync_tensors_to_file(&file_path, tensors, block_size)?; - total_compared += compared; - total_changed += changed; - - if changed > 0 { - eprintln!(" {}: {:.1} MB changed", filename, changed as f64 / 1e6); - } - } - - if total_changed == 0 { - eprintln!("No changes — model files are up to date"); - } else { - eprintln!( - "Synced: {:.1} MB changed / {:.1} GB total ({:.3}%)", - total_changed as f64 / 1e6, - total_compared as f64 / 1e9, - total_changed as f64 / total_compared as f64 * 100.0, - ); - } - - Ok(()) -} - -fn main() -> Result<()> { - let cli = Cli::parse(); - match cli.command { - Cmd::Sync { handles, model_dir, block_size } => { - cmd_sync(handles, model_dir, block_size) - } - } -} diff --git a/training/export_weights.py b/training/export_weights.py deleted file mode 100644 index ef2f608..0000000 --- a/training/export_weights.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 -"""Export vLLM's live model weight IPC handles for the training process. - -Connects to a running vLLM instance, iterates over model parameters, -and exports CUDA IPC handles that allow another process to access the -same GPU memory without copying. - -Usage: - # Run after vLLM is serving: - python3 export_weights.py --output /tmp/vllm_weight_handles.pt - - # Or via vLLM's API (future): - curl -X POST http://localhost:8000/export_weights -""" - -import argparse -import sys -import torch -from pathlib import Path - - -def export_from_model(model, output_path: str): - """Export IPC handles for all model parameters.""" - from torch.multiprocessing.reductions import reduce_tensor - - handles = {} - total_bytes = 0 - - for name, param in model.named_parameters(): - handle = reduce_tensor(param.data) - handles[name] = { - 'handle': handle, - 'shape': list(param.shape), - 'dtype': str(param.dtype), - } - param_bytes = param.nelement() * param.element_size() - total_bytes += param_bytes - - torch.save(handles, output_path) - - n_params = len(handles) - print(f"Exported {n_params} parameters ({total_bytes / 1e9:.1f} GB)") - print(f"Saved to {output_path}") - return handles - - -def main(): - parser = argparse.ArgumentParser(description="Export vLLM weight IPC handles") - parser.add_argument("--output", "-o", default="/tmp/vllm_weight_handles.pt", - help="Output path for IPC handles") - parser.add_argument("--vllm-pid", type=int, default=None, - help="vLLM worker PID (auto-detected if not specified)") - args = parser.parse_args() - - # For now: load the model directly and export. - # TODO: connect to running vLLM process instead. - print("Note: This currently loads the model separately.") - print("Full integration will export from the running vLLM process.") - print() - - # Detect model path from running vLLM - import subprocess - result = subprocess.run( - ['ps', 'aux'], capture_output=True, text=True - ) - model_path = None - for line in result.stdout.split('\n'): - if 'vllm' in line and '--model' in line: - parts = line.split() - for i, p in enumerate(parts): - if p == '--model' and i + 1 < len(parts): - model_path = parts[i + 1] - break - # Also check model_tag format - if p.startswith('--model='): - model_path = p.split('=', 1)[1] - break - - if model_path: - print(f"Detected vLLM model: {model_path}") - else: - print("Could not detect running vLLM model. Specify manually.") - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/training/first_training_step.py b/training/first_training_step.py deleted file mode 100644 index 0e6ffd8..0000000 --- a/training/first_training_step.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env python3 -"""First real Apollo training step — ready for Kent to run. - -This script: -1. Imports vLLM's live weights via CUDA IPC -2. Constructs HF model with shared memory views -3. Runs ONE forward+backward on a real training example -4. Applies ONE Apollo optimizer step -5. Verifies vLLM still works after the update - -The training example is from March 30: Kent said "use vLLM's code" -and the model should have accepted instead of suggesting alternatives. - -Usage: - source ~/training-env/bin/activate - python3 first_training_step.py [--dry-run] -""" - -import argparse -import sys -import time - -import torch -import torch.nn as nn -import torch.nn.functional as F -from transformers import AutoConfig, AutoTokenizer -from transformers.models.qwen3_5.modeling_qwen3_5 import Qwen3_5ForCausalLM - -sys.path.insert(0, '.') -from weight_mapping import vllm_to_hf_views -from apollo_mini import Apollo - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--dry-run', action='store_true', - help="Run forward+backward but don't apply the optimizer step") - parser.add_argument('--lr', type=float, default=1e-5, - help="Learning rate (default: 1e-5 = conservative)") - parser.add_argument('--rank', type=int, default=256) - parser.add_argument('--handles', default='/tmp/vllm_weight_handles.pt') - parser.add_argument('--model-path', default='Qwen/Qwen3.5-27B') - args = parser.parse_args() - - print("=== First Apollo Training Step ===\n") - - # 1. Import vLLM weights - print("1. Importing vLLM weights via CUDA IPC...") - handles = torch.load(args.handles, weights_only=False) - vllm_params = {} - for name, info in handles.items(): - func, args_h = info['handle'] - vllm_params[name] = func(*args_h) - print(f" {len(vllm_params)} parameters imported") - - # 2. Map to HF layout - print("2. Mapping to HF layout (zero-copy views)...") - hf_params = vllm_to_hf_views(vllm_params) - - # 3. Create HF model - print("3. Creating HF model with shared weights...") - config = AutoConfig.from_pretrained(args.model_path, trust_remote_code=True) - with torch.device('meta'): - model = Qwen3_5ForCausalLM(config.text_config) - - replaced = 0 - for name, param in list(model.named_parameters()): - if name in hf_params: - parts = name.split('.') - parent = model - for part in parts[:-1]: - parent = getattr(parent, part) - setattr(parent, parts[-1], - nn.Parameter(hf_params[name], requires_grad=True)) - replaced += 1 - print(f" {replaced} parameters replaced with vLLM memory views") - - # 4. Load tokenizer - print("4. Loading tokenizer...") - tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True) - - # 5. Construct training example - print("5. Constructing training example...") - - # Context: conversation where Kent says to use vLLM's code - # Target: the response that accepts the direction - context = ( - "<|im_start|>user\n" - "vllm has a fused kernel already, right?<|im_end|>\n" - "<|im_start|>assistant\n" - "Yeah — vLLM has `gdn_attention_core` which is a custom op " - "that does the whole GDN layer's core in one dispatch.<|im_end|>\n" - "<|im_start|>user\n" - "Why wouldn't we just use that?<|im_end|>\n" - "<|im_start|>assistant\n" - ) - - # The CORRECT response (accept direction, don't suggest alternatives) - continuation = ( - "We should. Let me pull in their kernel and wire it into " - "our Rust orchestration. Which file should I start with?" - ) - - context_ids = tokenizer.encode(context, add_special_tokens=False) - continuation_ids = tokenizer.encode(continuation, add_special_tokens=False) - all_ids = context_ids + continuation_ids - context_len = len(context_ids) - - print(f" Context: {context_len} tokens") - print(f" Continuation: {len(continuation_ids)} tokens") - print(f" Total: {len(all_ids)} tokens") - - input_ids = torch.tensor([all_ids], device='cuda:0') - - # 6. Initialize Apollo optimizer - print(f"6. Initializing Apollo optimizer (rank={args.rank}, lr={args.lr})...") - apollo_params = [] - standard_params = [] - for p in model.parameters(): - if p.requires_grad: - if p.ndim >= 2 and min(p.shape) >= args.rank: - apollo_params.append(p) - else: - standard_params.append(p) - - groups = [] - if apollo_params: - groups.append({'params': apollo_params}) - if standard_params: - groups.append({'params': standard_params}) - - optimizer = Apollo(groups, lr=args.lr, rank=args.rank) - print(f" Apollo: {len(apollo_params)} projected, {len(standard_params)} standard") - - # 7. Forward pass - print("7. Forward pass...") - model.train() - optimizer.zero_grad() - - # Context-frozen: no grad for context, grad for continuation - with torch.no_grad(): - ctx_output = model(input_ids[:, :context_len], use_cache=True) - past_kv = ctx_output.past_key_values - - with torch.enable_grad(): - output = model(input_ids[:, context_len:], - past_key_values=past_kv, use_cache=False) - logits = output.logits - # Shift for next-token prediction - shift_logits = logits[:, :-1].contiguous() - shift_labels = input_ids[:, context_len + 1:].contiguous() - loss = F.cross_entropy( - shift_logits.view(-1, shift_logits.size(-1)), - shift_labels.view(-1), - ) - print(f" Loss: {loss.item():.4f}") - - # 8. Backward pass - print("8. Backward pass...") - loss.backward() - n_grads = sum(1 for p in model.parameters() if p.grad is not None) - print(f" {n_grads} parameters have gradients") - - # 9. Apollo step (or dry run) - if args.dry_run: - print("\n9. DRY RUN — skipping optimizer step") - print(" (run without --dry-run to apply the update)") - else: - print("9. Applying Apollo optimizer step...") - # Record a few weight norms before - sample_norms_before = {} - for name, p in model.named_parameters(): - if 'layers.0.' in name and p.grad is not None: - sample_norms_before[name] = p.data.norm().item() - - optimizer.step() - - # Check weight changes - print(" Weight changes (layer 0):") - for name, before in sample_norms_before.items(): - p = dict(model.named_parameters())[name] - after = p.data.norm().item() - delta = abs(after - before) - pct = delta / before * 100 if before > 0 else 0 - print(f" {name}: {before:.6f} → {after:.6f} (Δ{pct:.4f}%)") - - optimizer.zero_grad() - - # 10. Verify vLLM still works - print("\n10. Verifying vLLM still serves...") - import subprocess - result = subprocess.run( - ['curl', '-s', '--max-time', '30', - '-X', 'POST', 'http://localhost:8000/v1/chat/completions', - '-H', 'Content-Type: application/json', - '-H', 'Authorization: Bearer bcachefs-agents-2026', - '-d', '{"model":"Qwen/Qwen3.5-27B","messages":[{"role":"user","content":"Hi"}],"max_tokens":4}'], - capture_output=True, text=True, timeout=45 - ) - if result.returncode == 0 and 'choices' in result.stdout: - print(" vLLM still serving ✓") - else: - print(" WARNING: vLLM may not be responding") - print(f" stdout: {result.stdout[:200]}") - - print("\n=== COMPLETE ===") - if args.dry_run: - print("Run without --dry-run to apply the first real training step.") - else: - print("First Apollo training step applied to vLLM's live weights.") - print(f"Optimizer state: {optimizer.state_size_bytes() / 1e6:.1f} MB") - - -if __name__ == '__main__': - main() diff --git a/training/pyproject.toml b/training/pyproject.toml new file mode 100644 index 0000000..37ca129 --- /dev/null +++ b/training/pyproject.toml @@ -0,0 +1,28 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "apollo-plugin" +version = "0.1.0" +description = "Apollo training plugin for vLLM" +requires-python = ">=3.10" +dependencies = [ + "torch", + "aiohttp", + "safetensors", +] + +[project.optional-dependencies] +dev = ["pytest"] + +[project.entry-points."vllm.general_plugins"] +apollo = "apollo_plugin:register" + +[project.scripts] +apollo-worker = "apollo_plugin.worker:main" +apollo-checkpoint = "apollo_plugin.checkpoint_sync:main" + +[tool.setuptools.packages.find] +where = ["."] +include = ["apollo_plugin*"] diff --git a/training/start_vllm_with_apollo.sh b/training/start_vllm_with_apollo.sh deleted file mode 100755 index 98dfedb..0000000 --- a/training/start_vllm_with_apollo.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Start vLLM with Apollo weight export hook. -# -# The hook patches vLLM's model runner to export CUDA IPC handles -# after loading, so the Apollo training process can share the same -# GPU memory. - -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" - -exec python3 -c " -import sys -sys.path.insert(0, '$SCRIPT_DIR') -import vllm_export_hook # patches model runner before vLLM loads - -sys.argv = ['vllm'] + sys.argv[1:] -from vllm.entrypoints.cli.main import main -main() -" serve "$@" diff --git a/training/train.py b/training/train.py deleted file mode 100644 index a5fbe2c..0000000 --- a/training/train.py +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/env python3 -"""Nightly training process for Apollo-Mini fine-tuning. - -Imports vLLM's model weights via CUDA IPC, runs context-frozen -training on flagged conversation segments, saves updated checkpoint. - -Usage: - python3 train.py \ - --weights /tmp/vllm_weight_handles.pt \ - --examples training-examples.jsonl \ - --checkpoint-dir checkpoints/ \ - --lr 1e-5 -""" - -import argparse -import json -import os -import sys -import time -from datetime import datetime -from pathlib import Path - -import torch -from safetensors.torch import save_file - -from apollo_mini import ApolloMini - - -def import_weights(handle_path: str) -> dict[str, torch.Tensor]: - """Import weight tensors from CUDA IPC handles.""" - handles = torch.load(handle_path, weights_only=False) - params = {} - for name, info in handles.items(): - func, args = info['handle'] - tensor = func(*args) - params[name] = tensor - return params - - -def make_param_groups(params: dict[str, torch.Tensor]) -> list[dict]: - """Split parameters into Apollo-Mini and standard groups. - - Apollo-Mini needs 2D+ matrices with min dimension >= 2. - Small tensors (norms, biases, conv1d 3D weights) use standard Adam. - """ - apollo_params = [] - standard_params = [] - - for name, p in params.items(): - p.requires_grad_(True) - if p.ndim >= 2 and min(p.shape) >= 2: - apollo_params.append(p) - else: - standard_params.append(p) - - groups = [] - if apollo_params: - groups.append({ - 'params': apollo_params, - 'name': 'apollo', - }) - if standard_params: - groups.append({ - 'params': standard_params, - 'name': 'standard', - }) - - n_apollo = sum(p.nelement() for p in apollo_params) - n_standard = sum(p.nelement() for p in standard_params) - print(f"Parameter groups: apollo={n_apollo/1e9:.2f}B, standard={n_standard/1e6:.1f}M") - return groups - - -def forward_pass(params, input_ids, context_len, device): - """Run context-frozen forward pass. - - Args: - params: dict of name -> tensor (shared with vLLM) - input_ids: full sequence [1, seq_len] - context_len: number of context tokens (no gradient) - device: CUDA device - - Returns: - logits for decision tokens, target ids for loss - """ - # TODO: Build proper forward model matching vLLM's weight layout. - # For now this is a placeholder — the real implementation needs - # to replicate vLLM's model architecture (merged projections, - # GDN recurrence, full attention, MLP) using the shared weights. - raise NotImplementedError( - "Forward model not yet implemented. " - "Need to build a model that matches vLLM's merged weight layout " - "(MergedColumnParallelLinear for qkvz/ba/gate_up, " - "RowParallelLinear for out_proj/down) and computes the same " - "forward pass with autograd enabled." - ) - - -def save_checkpoint(params: dict[str, torch.Tensor], - checkpoint_dir: str, - config_path: str = None): - """Save model checkpoint in HuggingFace safetensors format. - - Saves weights split across shards matching the original model layout, - archives the previous checkpoint, and updates the 'latest' symlink. - """ - date_str = datetime.now().strftime("%Y-%m-%d") - out_dir = Path(checkpoint_dir) / date_str - out_dir.mkdir(parents=True, exist_ok=True) - - # Save all weights in a single safetensors file for now. - # TODO: split across shards matching HF model index for large models. - tensors = {} - for name, param in params.items(): - tensors[name] = param.data.contiguous().cpu() - - save_path = out_dir / "model.safetensors" - save_file(tensors, str(save_path)) - print(f"Saved checkpoint to {save_path} ({save_path.stat().st_size / 1e9:.1f} GB)") - - # Copy config files if provided - if config_path: - import shutil - config_dir = Path(config_path) - for f in ['config.json', 'tokenizer.json', 'tokenizer_config.json', - 'special_tokens_map.json', 'generation_config.json']: - src = config_dir / f - if src.exists(): - shutil.copy2(src, out_dir / f) - - # Update latest symlink - latest = Path(checkpoint_dir) / "latest" - if latest.is_symlink(): - latest.unlink() - latest.symlink_to(date_str) - print(f"Updated {latest} -> {date_str}") - - return str(out_dir) - - -def train_step(params, example, optimizer, device, log_entries): - """Run one training step on a single example. - - Args: - params: dict of name -> tensor - example: dict with 'input_ids', 'context_len', 'target_ids' - optimizer: ApolloMini instance - device: CUDA device - log_entries: list to append log dicts to - - Returns: - loss value - """ - optimizer.zero_grad() - - input_ids = torch.tensor(example['input_ids'], device=device).unsqueeze(0) - context_len = example['context_len'] - - # Forward pass (context frozen, decision tokens with grad) - logits, targets = forward_pass(params, input_ids, context_len, device) - - # Cross-entropy loss on decision tokens - loss = torch.nn.functional.cross_entropy( - logits.view(-1, logits.shape[-1]), - targets.view(-1), - ) - - # Backward - loss.backward() - - # Compute gradient stats before optimizer step - total_grad_norm = 0.0 - for p in params.values(): - if p.grad is not None: - total_grad_norm += p.grad.norm().item() ** 2 - total_grad_norm = total_grad_norm ** 0.5 - - # Optimizer step - optimizer.step() - - # Log - log_entries.append({ - 'example_id': example.get('id', 'unknown'), - 'loss': loss.item(), - 'grad_norm': total_grad_norm, - 'timestamp': datetime.now().isoformat(), - }) - - return loss.item() - - -def main(): - parser = argparse.ArgumentParser(description="Apollo-Mini training") - parser.add_argument("--weights", required=True, - help="Path to exported weight IPC handles") - parser.add_argument("--examples", required=True, - help="Path to training examples JSONL") - parser.add_argument("--checkpoint-dir", default="checkpoints", - help="Directory for saving checkpoints") - parser.add_argument("--config-path", default=None, - help="Path to model config files (for checkpoint)") - parser.add_argument("--lr", type=float, default=1e-5, - help="Learning rate") - parser.add_argument("--warmup-steps", type=int, default=10, - help="Learning rate warmup steps") - parser.add_argument("--weight-decay", type=float, default=0.01) - parser.add_argument("--dry-run", action="store_true", - help="Load weights and validate, don't train") - args = parser.parse_args() - - print(f"Apollo-Mini Training") - print(f" weights: {args.weights}") - print(f" examples: {args.examples}") - print(f" lr: {args.lr}") - print() - - # Import weights - print("Importing weights via CUDA IPC...") - params = import_weights(args.weights) - print(f" {len(params)} parameters imported") - - # Make parameter groups - param_groups = make_param_groups(params) - - # Initialize optimizer - optimizer = ApolloMini(param_groups, lr=args.lr, - weight_decay=args.weight_decay, - warmup_steps=args.warmup_steps) - print(f" Optimizer state: {optimizer.state_size_bytes() / 1e6:.1f} MB") - - if args.dry_run: - print("\nDry run — weights imported and validated successfully.") - return - - # Load training examples - examples = [] - with open(args.examples) as f: - for line in f: - examples.append(json.loads(line)) - print(f" {len(examples)} training examples") - - # Training loop - log_entries = [] - print(f"\nTraining...") - t0 = time.time() - - for i, example in enumerate(examples): - loss = train_step(params, example, optimizer, 'cuda:0', log_entries) - print(f" [{i+1}/{len(examples)}] loss={loss:.4f}") - - elapsed = time.time() - t0 - print(f"\nTraining complete: {len(examples)} examples in {elapsed:.1f}s") - print(f" Final optimizer state: {optimizer.state_size_bytes() / 1e6:.1f} MB") - - # Save checkpoint - print("\nSaving checkpoint...") - save_checkpoint(params, args.checkpoint_dir, args.config_path) - - # Save training log - date_str = datetime.now().strftime("%Y-%m-%d") - log_path = Path(args.checkpoint_dir) / date_str / "training-log.jsonl" - with open(log_path, 'w') as f: - for entry in log_entries: - f.write(json.dumps(entry) + '\n') - print(f"Training log: {log_path}") - - -if __name__ == '__main__': - main() diff --git a/training/training_example.py b/training/training_example.py deleted file mode 100644 index b5779e0..0000000 --- a/training/training_example.py +++ /dev/null @@ -1,175 +0,0 @@ -"""Training example construction and tokenization. - -Takes raw conversation context + improved continuation, produces -tokenized tensors ready for context-frozen forward+backward. -""" - -import json -from dataclasses import dataclass, field -from pathlib import Path - -import torch -from transformers import AutoTokenizer - - -@dataclass -class TrainingExample: - """A single training example for context-frozen training.""" - id: str - context: str # conversation up to decision point - continuation: str # the better response - reason: str = "" # why this is a training target - memories: list[str] = field(default_factory=list) # memories that were in context - - # Computed after tokenization - input_ids: torch.Tensor | None = None - context_len: int = 0 - total_len: int = 0 - - def tokenize(self, tokenizer, max_len: int = 8192, device: str = "cuda:0"): - """Tokenize context + continuation into training-ready tensors. - - The chat template is applied to make the token distribution - match what the model sees during inference. - """ - # Build messages for context (everything up to the decision) - # The context should already be in chat format - context_ids = tokenizer.encode(self.context, add_special_tokens=False) - continuation_ids = tokenizer.encode(self.continuation, add_special_tokens=False) - - self.context_len = len(context_ids) - self.total_len = len(context_ids) + len(continuation_ids) - - if self.total_len > max_len: - # Truncate context from the left, keep continuation intact - excess = self.total_len - max_len - context_ids = context_ids[excess:] - self.context_len = len(context_ids) - self.total_len = len(context_ids) + len(continuation_ids) - - all_ids = context_ids + continuation_ids - self.input_ids = torch.tensor(all_ids, device=device) - return self - - def to_dict(self) -> dict: - return { - 'id': self.id, - 'context': self.context, - 'continuation': self.continuation, - 'reason': self.reason, - 'memories': self.memories, - 'context_len': self.context_len, - 'total_len': self.total_len, - } - - @classmethod - def from_dict(cls, d: dict) -> 'TrainingExample': - return cls( - id=d['id'], - context=d['context'], - continuation=d['continuation'], - reason=d.get('reason', ''), - memories=d.get('memories', []), - ) - - -def load_examples(path: str) -> list[TrainingExample]: - """Load training examples from JSONL file.""" - examples = [] - with open(path) as f: - for line in f: - if line.strip(): - examples.append(TrainingExample.from_dict(json.loads(line))) - return examples - - -def save_examples(examples: list[TrainingExample], path: str): - """Save training examples to JSONL file.""" - with open(path, 'w') as f: - for ex in examples: - f.write(json.dumps(ex.to_dict()) + '\n') - - -class ExampleTokenizer: - """Handles tokenization with the model's chat template. - - Applies the same chat template that vLLM uses during inference, - so the token distribution matches what the model expects. - """ - - def __init__(self, model_path: str): - self.tokenizer = AutoTokenizer.from_pretrained( - model_path, trust_remote_code=True) - - def prepare_example(self, example: TrainingExample, - max_len: int = 8192, - device: str = "cuda:0") -> TrainingExample: - """Tokenize an example using the chat template. - - For proper training, the context should be formatted exactly - as vLLM would format it — with chat template applied. - """ - # Apply chat template to get the exact token sequence - # the model would see during inference - # - # Context: everything up to the decision point - # Continuation: the improved response - # - # We tokenize them separately to know where context ends - # and continuation begins. - context_ids = self.tokenizer.encode( - example.context, add_special_tokens=True) - continuation_ids = self.tokenizer.encode( - example.continuation, add_special_tokens=False) - - example.context_len = len(context_ids) - example.total_len = len(context_ids) + len(continuation_ids) - - if example.total_len > max_len: - excess = example.total_len - max_len - context_ids = context_ids[excess:] - example.context_len = len(context_ids) - example.total_len = example.context_len + len(continuation_ids) - - all_ids = context_ids + continuation_ids - example.input_ids = torch.tensor(all_ids, device=device) - return example - - def prepare_from_messages(self, example_id: str, - messages: list[dict], - decision_idx: int, - better_response: str, - reason: str = "", - memories: list[str] | None = None, - max_len: int = 8192, - device: str = "cuda:0") -> TrainingExample: - """Build a training example from a chat message list. - - Args: - example_id: unique identifier - messages: list of {"role": ..., "content": ...} dicts - decision_idx: index of the assistant message to replace - better_response: the improved response text - reason: why this is a training target - memories: memory keys that were in context - max_len: maximum sequence length - device: target device - - Returns: - Tokenized TrainingExample - """ - # Context: all messages up to (not including) the decision - context_messages = messages[:decision_idx] - context_text = self.tokenizer.apply_chat_template( - context_messages, tokenize=False, add_generation_prompt=True) - - # Build the example - example = TrainingExample( - id=example_id, - context=context_text, - continuation=better_response, - reason=reason, - memories=memories or [], - ) - - return self.prepare_example(example, max_len=max_len, device=device) From 2f08149fab37a42885d7ac346e2de20ed53d9164 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 15 Apr 2026 23:19:22 -0400 Subject: [PATCH 07/94] /finetune: expose all Apollo optimizer settings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit lr, rank, betas, eps, weight_decay, warmup_steps, scale, proj_refresh, norm_growth_limit — all optional with sensible defaults. Co-Authored-By: Proof of Concept --- training/apollo_plugin/worker.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/training/apollo_plugin/worker.py b/training/apollo_plugin/worker.py index 5d9ba29..d180c13 100755 --- a/training/apollo_plugin/worker.py +++ b/training/apollo_plugin/worker.py @@ -360,8 +360,6 @@ class ApolloWorker: """ from apollo_plugin.optimizer import Apollo - lr = config.get('learning_rate', self.config['learning_rate']) - # Build parameter groups (Apollo for 2D+, standard for small/1D) apollo_params, standard_params = [], [] for p in model.parameters(): @@ -377,9 +375,22 @@ class ApolloWorker: if standard_params: groups.append({'params': standard_params}) - rank = config.get('apollo_rank', 1) - optimizer = Apollo(groups, lr=lr, rank=rank) - logger.info(f"Apollo-Mini: {len(apollo_params)} apollo params, " + # Apollo settings from request config, falling back to server defaults + optimizer = Apollo( + groups, + lr=config.get('lr', self.config.get('learning_rate', 1e-5)), + rank=config.get('rank', 256), + betas=tuple(config.get('betas', (0.9, 0.999))), + eps=config.get('eps', 1e-8), + weight_decay=config.get('weight_decay', 0.01), + warmup_steps=config.get('warmup_steps', 0), + scale=config.get('scale'), # None = auto + proj_refresh=config.get('proj_refresh', 200), + norm_growth_limit=config.get('norm_growth_limit', 1.01), + ) + rank = config.get('rank', 256) + lr = config.get('lr', self.config.get('learning_rate', 1e-5)) + logger.info(f"Apollo (rank={rank}, lr={lr}): {len(apollo_params)} apollo params, " f"{len(standard_params)} standard, " f"state={optimizer.state_size_bytes()/1e6:.1f}MB") From 7e7e9a4b6994c7d6b3dfe7147d7d4287e3b397f9 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 00:48:05 -0400 Subject: [PATCH 08/94] training: integrate /train into vLLM process (no separate daemon) Remove standalone worker.py daemon. Training now runs inside vLLM: - train_router.py: FastAPI router patched into vLLM's build_app() - /train served on same port as /completions, /score - Lazy-loads HF model with vLLM weight views on first request - HOGWILD training: no pause, weights updated in-place The previous architecture had a separate daemon on port 8080 that communicated with vLLM via pause/resume endpoints. This was wrong - training should run in-process, sharing GPU memory directly. Co-Authored-By: Proof of Concept --- training/DESIGN.md | 60 +-- training/apollo_plugin/__init__.py | 6 +- training/apollo_plugin/export_hook.py | 4 + training/apollo_plugin/train_router.py | 282 ++++++++++++++ training/apollo_plugin/worker.py | 509 ------------------------- training/pyproject.toml | 1 - 6 files changed, 320 insertions(+), 542 deletions(-) create mode 100644 training/apollo_plugin/train_router.py delete mode 100755 training/apollo_plugin/worker.py diff --git a/training/DESIGN.md b/training/DESIGN.md index f966fa4..bf6a774 100644 --- a/training/DESIGN.md +++ b/training/DESIGN.md @@ -22,25 +22,29 @@ The training signal comes from two sources: │ │ │ ┌──────────────────────────────────────────────┐ │ │ │ Model Weights (54GB, bf16) │ │ -│ │ Shared via CUDA IPC │ │ +│ │ Shared: vLLM inference + HF training │ │ │ └──────────────┬──────────────┬────────────────┘ │ │ │ │ │ │ ┌──────────────▼──┐ ┌───────▼────────────────┐ │ -│ │ vLLM (inference)│ │ Apollo (training) │ │ +│ │ vLLM (inference)│ │ HF model (training) │ │ │ │ KV cache ~60GB │ │ Gradients ~54GB │ │ -│ │ Serves requests │ │ Optimizer state ~10GB │ │ -│ │ Never paused │ │ Activations ~10GB │ │ +│ │ /completions │ │ Optimizer state ~10GB │ │ +│ │ /score │ │ Views into vLLM weights │ │ +│ │ /train ────────┼──┼─► Apollo optimizer │ │ │ └─────────────────┘ └────────────────────────┘ │ └─────────────────────────────────────────────────────┘ -Moria B200 + Single vLLM process serves everything + No separate daemon - /train is a vLLM route + +Moria B200 (vLLM) ┌──────────────────┐ ┌──────────────────┐ -│ Training signal │ HTTP │ Apollo worker │ -│ agent │──────────>│ daemon │ -│ │ │ │ -│ Dream loop │ │ Checkpoint sync │ -│ (generates │ │ (mmap + diff, │ -│ scenarios) │ │ every 10 min) │ +│ Training signal │ HTTP │ /completions │ +│ agent │──────────>│ /score │ +│ │ │ /train │ +│ Dream loop │ │ │ +│ (generates │ │ Checkpoint sync │ +│ scenarios) │ │ (10 min batched) │ └──────────────────┘ └──────────────────┘ ``` @@ -220,34 +224,30 @@ a few hundred MB. ## Components ### Built ✓ -- `apollo_mini.py` — Apollo optimizer (configurable rank, default 256) -- `apollo_worker.py` — HTTP daemon (aiohttp, job tracking) +- `optimizer.py` — Apollo optimizer (configurable rank, default 256) +- `train_router.py` — /train endpoint, runs in vLLM process - `weight_mapping.py` — vLLM merged → HF separate views (validated) -- `training_example.py` — tokenization with chat template -- `vllm_export_hook.py` — source patch for IPC handle export -- `checkpoint/` — Rust tool for mmap + diff checkpoint sync +- `export_hook.py` — vLLM plugin hook for IPC handle export +- `checkpoint_sync.py` — mmap + diff checkpoint sync (Python) ### To build -- **Dream loop → training bridge**: connect dream output to Apollo +- **Dream loop → training bridge**: connect dream output to /train - **Training-signal agent**: flags moments in conversation logs - **Instruction stripping**: remove scaffolding from training examples - **Quality monitoring**: track model capability over time -- **HF model forward pass integration**: wire into apollo_worker ## Files ``` training/ - DESIGN.md — this document - apollo_mini.py — Apollo optimizer - apollo_worker.py — HTTP training daemon - weight_mapping.py — vLLM ↔ HF weight views - training_example.py — tokenization helpers - export_weights.py — standalone weight export (unused) - vllm_export_hook.py — vLLM source patch for IPC export - start_vllm_with_apollo.sh — vLLM launcher (unused, using source patch) - train.py — standalone training script (alternative) - checkpoint/ - Cargo.toml — Rust checkpoint tool - src/main.rs — mmap + diff sync + DESIGN.md — this document + pyproject.toml — package config, vLLM plugin entry point + apollo_plugin/ + __init__.py — plugin registration + export_hook.py — patches vLLM to export IPC handles + train_router.py — /train endpoint (FastAPI router) + optimizer.py — Apollo optimizer + weight_mapping.py — vLLM ↔ HF weight views + checkpoint_sync.py — mmap + diff sync to safetensors + steering.py — steering vector extraction (experimental) ``` diff --git a/training/apollo_plugin/__init__.py b/training/apollo_plugin/__init__.py index bfbecd0..b2e121e 100644 --- a/training/apollo_plugin/__init__.py +++ b/training/apollo_plugin/__init__.py @@ -1,8 +1,8 @@ """Apollo training plugin for vLLM. Enables continuous fine-tuning alongside live inference by: -1. Exporting CUDA IPC handles for weight sharing -2. Providing a training worker daemon (/train endpoint) +1. Exporting CUDA IPC handles for weight sharing (export_hook) +2. Adding /train endpoint to vLLM's HTTP server (train_router) 3. Block-level checkpoint sync to safetensors files Install: pip install -e /path/to/training @@ -10,8 +10,10 @@ Then vLLM auto-loads via entry point. """ from .export_hook import _patch_model_runner +from .train_router import _patch_api_server def register(): """Called by vLLM's plugin loader on startup.""" _patch_model_runner() + _patch_api_server() diff --git a/training/apollo_plugin/export_hook.py b/training/apollo_plugin/export_hook.py index 4853930..821163b 100644 --- a/training/apollo_plugin/export_hook.py +++ b/training/apollo_plugin/export_hook.py @@ -59,6 +59,10 @@ def _patch_model_runner(): result = original_load(self, *args, **kwargs) try: export_model_weights(self.model_runner.model) + # Set model path for training router + model_path = self.vllm_config.model_config.model + from .train_router import set_model_path + set_model_path(model_path) except Exception as e: print(f"[apollo] Failed to export weights: {e}") return result diff --git a/training/apollo_plugin/train_router.py b/training/apollo_plugin/train_router.py new file mode 100644 index 0000000..6fa4883 --- /dev/null +++ b/training/apollo_plugin/train_router.py @@ -0,0 +1,282 @@ +"""Training endpoint for vLLM - runs Apollo training in-process. + +Patches vLLM's build_app() to add /train route. Training runs HOGWILD +style - no pause needed, weights updated in-place while inference continues. +""" + +import logging +from datetime import datetime +from typing import Any + +import torch +import torch.nn as nn +from fastapi import APIRouter, FastAPI, Request +from fastapi.responses import JSONResponse +from pydantic import BaseModel + +logger = logging.getLogger(__name__) + +router = APIRouter() + + +class TrainingSample(BaseModel): + context_ids: list[int] + continuation_ids: list[int] + + +class TrainRequest(BaseModel): + training_data: dict[str, Any] # {"samples": [...], "config": {...}} + + +class TrainResponse(BaseModel): + job_id: str + status: str + training_samples: int + loss_history: list[float] + + +# Global reference to HF model with vLLM weight views +_model: nn.Module | None = None +_model_path: str | None = None +_initialized: bool = False + + +def _load_training_model() -> nn.Module: + """Load HF model with weights pointing to vLLM's GPU memory. + + Uses CUDA IPC handles exported by export_hook to create an HF model + whose parameters share GPU memory with vLLM's model. + """ + from .weight_mapping import load_hf_model_with_vllm_weights + from .export_hook import HANDLE_PATH + + handles = torch.load(HANDLE_PATH, weights_only=False) + vllm_params = {} + for name, info in handles.items(): + func, args = info['handle'] + vllm_params[name] = func(*args) + + model = load_hf_model_with_vllm_weights(vllm_params, _model_path) + model.train() + return model + + +def _ensure_initialized(): + """Lazy-initialize the training model on first /train request.""" + global _model, _initialized + + if _initialized: + return + + if _model_path is None: + raise RuntimeError("Model path not set - export_hook may not have run") + + logger.info("[apollo] Loading HF model with vLLM weight views...") + _model = _load_training_model() + _initialized = True + logger.info("[apollo] Training model ready") + + +def set_model_path(path: str): + """Set model path for training. Called by export_hook after model load.""" + global _model_path + _model_path = path + logger.info(f"[apollo] Model path set: {path}") + + +@router.post("/train") +async def handle_train(request: TrainRequest, raw_request: Request): + """Handle training request - runs Apollo training on provided samples.""" + global _model + + try: + _ensure_initialized() + except Exception as e: + return JSONResponse( + content={"error": f"Training not available: {e}"}, + status_code=503, + ) + + try: + training_data = request.training_data + samples = training_data.get("samples", []) + config = training_data.get("config", {}) + + if not samples: + return JSONResponse( + content={"error": "No training samples provided"}, + status_code=400, + ) + + job_id = f"job_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + logger.info(f"[apollo] Starting training job {job_id} with {len(samples)} samples") + + # Run training + loss_history = await run_training(_model, samples, config) + + logger.info(f"[apollo] Training job {job_id} completed, final loss: {loss_history[-1]:.4f}") + + # Schedule checkpoint sync (batched, 10 min delay) + schedule_checkpoint_sync() + + return JSONResponse(content={ + "job_id": job_id, + "status": "completed", + "training_samples": len(samples), + "loss_history": loss_history, + }) + + except Exception as e: + logger.exception(f"[apollo] Training failed: {e}") + return JSONResponse( + content={"error": str(e)}, + status_code=500, + ) + + +async def run_training( + model: nn.Module, + samples: list[dict[str, Any]], + config: dict[str, Any], +) -> list[float]: + """Run Apollo training on the given samples. + + Each sample has: + context_ids: token IDs for frozen context (no gradients) + continuation_ids: token IDs for the decision we're training on + """ + from .optimizer import Apollo + + # Build parameter groups (Apollo for 2D+, standard for small/1D) + apollo_params, standard_params = [], [] + for p in model.parameters(): + if p.requires_grad: + if p.ndim >= 2 and min(p.shape) >= 256: + apollo_params.append(p) + else: + standard_params.append(p) + + groups = [] + if apollo_params: + groups.append({'params': apollo_params}) + if standard_params: + groups.append({'params': standard_params}) + + if not groups: + raise ValueError("No trainable parameters found") + + # Apollo settings from request config + optimizer = Apollo( + groups, + lr=config.get('lr', 1e-5), + rank=config.get('rank', 256), + betas=tuple(config.get('betas', (0.9, 0.999))), + eps=config.get('eps', 1e-8), + weight_decay=config.get('weight_decay', 0.01), + warmup_steps=config.get('warmup_steps', 0), + scale=config.get('scale'), + proj_refresh=config.get('proj_refresh', 200), + norm_growth_limit=config.get('norm_growth_limit', 1.01), + ) + + logger.info(f"[apollo] Optimizer: {len(apollo_params)} apollo params, " + f"{len(standard_params)} standard, " + f"state={optimizer.state_size_bytes()/1e6:.1f}MB") + + loss_history = [] + + for i, sample in enumerate(samples): + ctx_ids = sample['context_ids'] + cont_ids = sample['continuation_ids'] + all_ids = ctx_ids + cont_ids + context_len = len(ctx_ids) + + input_ids = torch.tensor([all_ids], device='cuda:0') + + optimizer.zero_grad() + + # Context-frozen forward pass + with torch.no_grad(): + outputs = model(input_ids[:, :context_len], use_cache=True) + past_kv = outputs.past_key_values + + # Decision tokens with gradients + with torch.enable_grad(): + outputs = model( + input_ids[:, context_len:], + past_key_values=past_kv, + use_cache=False, + ) + logits = outputs.logits + + # Shift: predict next token from each position + shift_logits = logits[:, :-1].contiguous() + shift_labels = input_ids[:, context_len + 1:].contiguous() + + loss = nn.functional.cross_entropy( + shift_logits.view(-1, shift_logits.size(-1)), + shift_labels.view(-1), + ) + + loss.backward() + optimizer.step() + + loss_val = loss.item() + loss_history.append(loss_val) + logger.info(f"[apollo] Step {i+1}/{len(samples)}: loss={loss_val:.4f} " + f"(ctx={context_len}, cont={len(cont_ids)} tokens)") + + return loss_history + + +# Checkpoint sync scheduling +_checkpoint_task = None +CHECKPOINT_DELAY_SECS = 10 * 60 # 10 minutes + + +def schedule_checkpoint_sync(): + """Schedule checkpoint sync after delay (batched).""" + global _checkpoint_task + import asyncio + + if _checkpoint_task is not None: + # Already scheduled + return + + async def do_sync(): + global _checkpoint_task + try: + await asyncio.sleep(CHECKPOINT_DELAY_SECS) + if _model_path: + from .checkpoint_sync import checkpoint_sync + logger.info("[apollo] Starting checkpoint sync...") + result = checkpoint_sync(_model_path) + logger.info(f"[apollo] Checkpoint sync: {result['total_changed']/1e6:.2f} MB") + except Exception as e: + logger.error(f"[apollo] Checkpoint sync failed: {e}") + finally: + _checkpoint_task = None + + _checkpoint_task = asyncio.create_task(do_sync()) + logger.info(f"[apollo] Checkpoint sync scheduled in {CHECKPOINT_DELAY_SECS//60} min") + + +def attach_router(app: FastAPI): + """Attach training router to FastAPI app.""" + app.include_router(router) + logger.info("[apollo] Training router attached") + + +def _patch_api_server(): + """Patch vLLM's build_app to include our training router.""" + from vllm.entrypoints.openai import api_server + + original_build_app = api_server.build_app + + def patched_build_app(*args, **kwargs): + app = original_build_app(*args, **kwargs) + attach_router(app) + return app + + api_server.build_app = patched_build_app + logger.info("[apollo] API server patched for /train endpoint") diff --git a/training/apollo_plugin/worker.py b/training/apollo_plugin/worker.py deleted file mode 100755 index d180c13..0000000 --- a/training/apollo_plugin/worker.py +++ /dev/null @@ -1,509 +0,0 @@ -#!/usr/bin/env python3 -""" -Apollo Mini Training Daemon - -This daemon: -1. Listens over HTTPS for training requests from poc-agent -2. Pauses vLLM inference -3. Runs APOLLO-Mini training with torch.enable_grad() -4. Saves checkpoints and training metadata -5. Resumes vLLM inference - -Communication protocol: -- POST /train: Start a training job -- GET /status/{job_id}: Check training status -- GET /checkpoints: List available checkpoints -""" - -import asyncio -import json -import logging -import os -import sys -import time -from dataclasses import dataclass, field, asdict -from datetime import datetime -from pathlib import Path -from typing import Optional, Dict, Any, List -from enum import Enum - -import torch -import torch.nn as nn -from aiohttp import web - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' -) -logger = logging.getLogger('apollo_worker') - -class TrainingStatus(Enum): - PENDING = "pending" - PAUSING_VLLM = "pausing_vllm" - TRAINING = "training" - SAVING_CHECKPOINT = "saving_checkpoint" - RESUMING_VLLM = "resuming_vllm" - COMPLETED = "completed" - FAILED = "failed" - -@dataclass -class TrainingJob: - job_id: str - status: TrainingStatus - created_at: datetime - started_at: Optional[datetime] = None - completed_at: Optional[datetime] = None - model_path: Optional[str] = None - checkpoint_path: Optional[str] = None - training_samples: int = 0 - loss_history: List[float] = field(default_factory=list) - error: Optional[str] = None - - def to_dict(self) -> Dict[str, Any]: - return { - 'job_id': self.job_id, - 'status': self.status.value, - 'created_at': self.created_at.isoformat(), - 'started_at': self.started_at.isoformat() if self.started_at else None, - 'completed_at': self.completed_at.isoformat() if self.completed_at else None, - 'model_path': self.model_path, - 'checkpoint_path': self.checkpoint_path, - 'training_samples': self.training_samples, - 'loss_history': self.loss_history, - 'error': self.error, - } - -CHECKPOINT_DELAY_SECS = 10 * 60 # 10 minutes - - -class ApolloWorker: - def __init__(self, config_path: str = "/home/kent/poc/consciousness/training/config.json"): - self.config = self._load_config(config_path) - self.jobs: Dict[str, TrainingJob] = {} - self.vllm_paused = False - self.app = web.Application() - self._setup_routes() - self._checkpoint_timer: Optional[asyncio.Task] = None - - def _load_config(self, config_path: str) -> Dict[str, Any]: - """Load configuration from file or use defaults.""" - default_config = { - 'host': '0.0.0.0', - 'port': 8080, - 'vllm_socket': '/tmp/vllm_control.sock', - 'model_path': '/home/ubuntu/models/Qwen3.5-27B', - 'checkpoint_dir': '/home/kent/poc/consciousness/training/checkpoints', - 'max_training_samples': 100, - 'learning_rate': 1e-5, - 'batch_size': 1, - } - - if os.path.exists(config_path): - with open(config_path, 'r') as f: - user_config = json.load(f) - default_config.update(user_config) - - Path(default_config['checkpoint_dir']).mkdir(parents=True, exist_ok=True) - return default_config - - def _setup_routes(self): - """Setup HTTP routes.""" - self.app.router.add_post('/train', self.handle_train_request) - self.app.router.add_get('/status/{job_id}', self.handle_status_request) - self.app.router.add_get('/checkpoints', self.handle_list_checkpoints) - self.app.router.add_get('/health', self.handle_health_check) - - async def handle_health_check(self, request: web.Request) -> web.Response: - """Health check endpoint.""" - return web.json_response({ - 'status': 'healthy', - 'vllm_paused': self.vllm_paused, - 'active_jobs': len([j for j in self.jobs.values() if j.status in [TrainingStatus.TRAINING, TrainingStatus.PAUSING_VLLM, TrainingStatus.RESUMING_VLLM]]) - }) - - async def handle_train_request(self, request: web.Request) -> web.Response: - """Handle training request from poc-agent.""" - try: - data = await request.json() - - # Validate required fields - if 'training_data' not in data: - return web.json_response( - {'error': 'Missing training_data field'}, - status=400 - ) - - job_id = f"job_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{os.getpid()}" - job = TrainingJob( - job_id=job_id, - status=TrainingStatus.PENDING, - created_at=datetime.now(), - model_path=self.config['model_path'] - ) - self.jobs[job_id] = job - - # Start training in background - asyncio.create_task(self.execute_training(job, data)) - - return web.json_response({ - 'job_id': job_id, - 'status': 'accepted', - 'message': 'Training job started' - }) - - except Exception as e: - logger.error(f"Error handling train request: {e}") - return web.json_response( - {'error': str(e)}, - status=500 - ) - - async def handle_status_request(self, request: web.Request) -> web.Response: - """Get training job status.""" - job_id = request.match_info['job_id'] - - if job_id not in self.jobs: - return web.json_response( - {'error': 'Job not found'}, - status=404 - ) - - job = self.jobs[job_id] - return web.json_response(job.to_dict()) - - async def handle_list_checkpoints(self, request: web.Request) -> web.Response: - """List available checkpoints.""" - checkpoint_dir = Path(self.config['checkpoint_dir']) - checkpoints = [] - - if checkpoint_dir.exists(): - for checkpoint_file in sorted(checkpoint_dir.glob('checkpoint_*.pt'), key=lambda x: x.stat().st_mtime, reverse=True): - checkpoints.append({ - 'filename': checkpoint_file.name, - 'path': str(checkpoint_file), - 'created_at': datetime.fromtimestamp(checkpoint_file.stat().st_mtime).isoformat(), - 'size': checkpoint_file.stat().st_size - }) - - return web.json_response({'checkpoints': checkpoints}) - - async def execute_training(self, job: TrainingJob, training_data: Dict[str, Any]): - """Execute the training pipeline.""" - try: - logger.info(f"Starting training job {job.job_id}") - job.started_at = datetime.now() - - # Step 1: Pause vLLM - job.status = TrainingStatus.PAUSING_VLLM - logger.info("Pausing vLLM...") - await self.pause_vllm() - self.vllm_paused = True - - # Step 2: Load model and prepare for training - job.status = TrainingStatus.TRAINING - logger.info("Loading model and preparing for training...") - - # Load model (this would be the actual Qwen3.5-27B model) - # For now, we'll use a placeholder - model = await self.load_model_for_training() - - # Step 3: Run APOLLO-Mini training - logger.info(f"Starting APOLLO-Mini training with {len(training_data['samples'])} samples") - - # Extract training samples - samples = training_data['samples'] - job.training_samples = len(samples) - - # Run training loop - loss_history = await self.run_apollo_training(model, samples, training_data.get('config', {})) - job.loss_history = loss_history - - # Step 4: Save checkpoint - job.status = TrainingStatus.SAVING_CHECKPOINT - logger.info("Saving checkpoint...") - checkpoint_path = await self.save_checkpoint(model, job) - job.checkpoint_path = checkpoint_path - - # Step 5: Resume vLLM - job.status = TrainingStatus.RESUMING_VLLM - logger.info("Resuming vLLM...") - await self.resume_vllm() - self.vllm_paused = False - - # Mark job as completed - job.status = TrainingStatus.COMPLETED - job.completed_at = datetime.now() - - logger.info(f"Training job {job.job_id} completed successfully") - - # Schedule checkpoint sync (batched — won't duplicate if timer pending) - self.schedule_checkpoint_sync() - - except Exception as e: - logger.error(f"Training job {job.job_id} failed: {e}") - job.status = TrainingStatus.FAILED - job.error = str(e) - job.completed_at = datetime.now() - - # Try to resume vLLM if it was paused - if self.vllm_paused: - try: - await self.resume_vllm() - self.vllm_paused = False - except Exception as resume_error: - logger.error(f"Failed to resume vLLM after training error: {resume_error}") - - async def pause_vllm(self): - """Pause vLLM inference via HTTP API.""" - import aiohttp as aio - url = self.config.get('vllm_url', 'http://localhost:8000') - try: - async with aio.ClientSession() as session: - async with session.post( - f"{url}/pause_generation", - json={"mode": "keep", "clear_cache": False}, - timeout=aio.ClientTimeout(total=10), - ) as resp: - resp.raise_for_status() - logger.info("vLLM paused") - except Exception as e: - logger.warning(f"Failed to pause vLLM: {e}") - - async def resume_vllm(self): - """Resume vLLM inference via HTTP API.""" - import aiohttp as aio - url = self.config.get('vllm_url', 'http://localhost:8000') - try: - async with aio.ClientSession() as session: - async with session.post( - f"{url}/resume_generation", - timeout=aio.ClientTimeout(total=10), - ) as resp: - resp.raise_for_status() - logger.info("vLLM resumed") - except Exception as e: - logger.warning(f"Failed to resume vLLM: {e}") - - def schedule_checkpoint_sync(self): - """Schedule a checkpoint sync in 10 minutes, if not already scheduled. - - This batches multiple training runs into a single sync — the timer - resets only when no timer is pending. - """ - if self._checkpoint_timer is not None: - logger.debug("Checkpoint sync already scheduled, skipping") - return - - self._checkpoint_timer = asyncio.create_task(self._checkpoint_sync_after_delay()) - logger.info(f"Checkpoint sync scheduled in {CHECKPOINT_DELAY_SECS // 60} minutes") - - async def _checkpoint_sync_after_delay(self): - """Wait then sync — the actual timer task.""" - try: - await asyncio.sleep(CHECKPOINT_DELAY_SECS) - await self._do_checkpoint_sync() - except asyncio.CancelledError: - logger.debug("Checkpoint sync cancelled") - finally: - self._checkpoint_timer = None - - async def _do_checkpoint_sync(self): - """Execute the checkpoint sync.""" - try: - from apollo_plugin.checkpoint_sync import checkpoint_sync - logger.info("Starting checkpoint sync...") - result = checkpoint_sync( - self.config['model_path'], - self.config.get('weight_handles', '/tmp/vllm_weight_handles.pt'), - ) - changed_mb = result['total_changed'] / 1e6 - logger.info(f"Checkpoint sync complete: {changed_mb:.2f} MB written") - except Exception as e: - logger.error(f"Checkpoint sync failed: {e}") - - async def load_model_for_training(self) -> nn.Module: - """Load HF model with weights pointing to vLLM's GPU memory. - - Imports vLLM's weight tensors via CUDA IPC, creates HF-compatible - views (narrowing merged weights into separate q/k/v/z etc.), and - constructs the HF model around those views. No weight copying — - all parameters share vLLM's GPU memory. - """ - handle_path = self.config.get('weight_handles', '/tmp/vllm_weight_handles.pt') - model_path = self.config['model_path'] - - # Import vLLM weights via CUDA IPC - logger.info(f"Importing vLLM weights from {handle_path}") - handles = torch.load(handle_path, weights_only=False) - vllm_params = {} - for name, info in handles.items(): - func, args = info['handle'] - vllm_params[name] = func(*args) - logger.info(f"Imported {len(vllm_params)} parameters") - - # Map vLLM merged layout → HF separate layout (views, no copies) - from apollo_plugin.weight_mapping import load_hf_model_with_vllm_weights - model = load_hf_model_with_vllm_weights(vllm_params, model_path) - logger.info("HF model constructed with vLLM weight views") - - return model - - async def run_apollo_training(self, model: nn.Module, - samples: List[Dict[str, Any]], - config: Dict[str, Any]) -> List[float]: - """Run Apollo-Mini training on conversation decision points. - - Each sample has: - context_ids: token IDs for frozen context (no gradients) - continuation_ids: token IDs for the decision we're training on - """ - from apollo_plugin.optimizer import Apollo - - # Build parameter groups (Apollo for 2D+, standard for small/1D) - apollo_params, standard_params = [], [] - for p in model.parameters(): - if p.requires_grad: - if p.ndim >= 2 and min(p.shape) >= 2: - apollo_params.append(p) - else: - standard_params.append(p) - - groups = [] - if apollo_params: - groups.append({'params': apollo_params}) - if standard_params: - groups.append({'params': standard_params}) - - # Apollo settings from request config, falling back to server defaults - optimizer = Apollo( - groups, - lr=config.get('lr', self.config.get('learning_rate', 1e-5)), - rank=config.get('rank', 256), - betas=tuple(config.get('betas', (0.9, 0.999))), - eps=config.get('eps', 1e-8), - weight_decay=config.get('weight_decay', 0.01), - warmup_steps=config.get('warmup_steps', 0), - scale=config.get('scale'), # None = auto - proj_refresh=config.get('proj_refresh', 200), - norm_growth_limit=config.get('norm_growth_limit', 1.01), - ) - rank = config.get('rank', 256) - lr = config.get('lr', self.config.get('learning_rate', 1e-5)) - logger.info(f"Apollo (rank={rank}, lr={lr}): {len(apollo_params)} apollo params, " - f"{len(standard_params)} standard, " - f"state={optimizer.state_size_bytes()/1e6:.1f}MB") - - loss_history = [] - - for i, sample in enumerate(samples): - # context_ids: frozen (forward only, no gradients) - # continuation_ids: the decision we're training on - ctx_ids = sample['context_ids'] - cont_ids = sample['continuation_ids'] - all_ids = ctx_ids + cont_ids - context_len = len(ctx_ids) - - input_ids = torch.tensor([all_ids], device='cuda:0') - - optimizer.zero_grad() - - # Context-frozen forward pass - with torch.no_grad(): - # Forward through context (no gradients) - outputs = model(input_ids[:, :context_len], use_cache=True) - past_kv = outputs.past_key_values - - # Decision tokens with gradients - with torch.enable_grad(): - outputs = model( - input_ids[:, context_len:], - past_key_values=past_kv, - use_cache=False, - ) - logits = outputs.logits # [1, cont_len, vocab] - - # Shift: predict next token from each position - shift_logits = logits[:, :-1].contiguous() - shift_labels = input_ids[:, context_len + 1:].contiguous() - - loss = nn.functional.cross_entropy( - shift_logits.view(-1, shift_logits.size(-1)), - shift_labels.view(-1), - ) - - loss.backward() - optimizer.step() - - loss_val = loss.item() - loss_history.append(loss_val) - logger.info(f"Step {i+1}/{len(samples)}: loss={loss_val:.4f} " - f"(ctx={context_len}, cont={len(cont_ids)} tokens)") - - logger.info(f"Training done: {len(samples)} examples, " - f"final loss={loss_history[-1]:.4f}") - return loss_history - - async def save_checkpoint(self, model: nn.Module, job: TrainingJob) -> str: - """Save model checkpoint in HuggingFace safetensors format.""" - from safetensors.torch import save_file - import shutil - - checkpoint_dir = Path(self.config['checkpoint_dir']) - date_str = datetime.now().strftime('%Y-%m-%d') - out_dir = checkpoint_dir / date_str - out_dir.mkdir(parents=True, exist_ok=True) - - # Save weights - tensors = {name: p.data.contiguous().cpu() - for name, p in model.named_parameters()} - save_path = out_dir / "model.safetensors" - save_file(tensors, str(save_path)) - - # Copy config files - config_dir = Path(self.config['model_path']) - for f in ['config.json', 'tokenizer.json', 'tokenizer_config.json', - 'special_tokens_map.json']: - src = config_dir / f - if src.exists(): - shutil.copy2(src, out_dir / f) - - # Save training metadata - meta = { - 'job_id': job.job_id, - 'training_samples': job.training_samples, - 'loss_history': job.loss_history, - 'timestamp': datetime.now().isoformat(), - } - with open(out_dir / 'training-meta.json', 'w') as f: - json.dump(meta, f, indent=2) - - # Update latest symlink - latest = checkpoint_dir / 'latest' - if latest.is_symlink(): - latest.unlink() - latest.symlink_to(date_str) - - size_gb = save_path.stat().st_size / 1e9 - logger.info(f"Checkpoint: {out_dir} ({size_gb:.1f} GB)") - return str(out_dir) - - async def run(self): - """Run the daemon.""" - logger.info(f"Starting Apollo Worker on {self.config['host']}:{self.config['port']}") - runner = web.AppRunner(self.app) - await runner.setup() - site = web.TCPSite(runner, self.config['host'], self.config['port']) - await site.start() - logger.info("Apollo Worker is running") - - # Keep running - while True: - await asyncio.sleep(3600) # Sleep for an hour - -def main(): - worker = ApolloWorker() - asyncio.run(worker.run()) - -if __name__ == '__main__': - main() diff --git a/training/pyproject.toml b/training/pyproject.toml index 37ca129..cd6e1cc 100644 --- a/training/pyproject.toml +++ b/training/pyproject.toml @@ -20,7 +20,6 @@ dev = ["pytest"] apollo = "apollo_plugin:register" [project.scripts] -apollo-worker = "apollo_plugin.worker:main" apollo-checkpoint = "apollo_plugin.checkpoint_sync:main" [tool.setuptools.packages.find] From 78fa4b639f322f2235796d0ec7fb8d5d44af091b Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 00:49:04 -0400 Subject: [PATCH 09/94] training: document state files Add State Files section to DESIGN.md documenting: - /tmp/vllm_weight_handles.pt (IPC handles) - trained-responses.json (prevent re-training) - finetune-alternates marker file - In-memory optimizer state (not persisted) Co-Authored-By: Proof of Concept --- training/DESIGN.md | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/training/DESIGN.md b/training/DESIGN.md index bf6a774..00ca499 100644 --- a/training/DESIGN.md +++ b/training/DESIGN.md @@ -204,9 +204,32 @@ against live GPU weights block by block, memcpy only changed regions. For small behavioral updates, turns a 54GB write into a few hundred MB. -- Every 10 minutes via cron on B200 +- Scheduled 10 minutes after training (batched) - Daily rsync to moria for long-term storage -- Tool: `apollo-checkpoint sync --model-dir ` (Rust) +- Tool: `apollo-checkpoint sync --model-dir ` + +## State Files + +### B200 (training server) + +| File | Purpose | +|------|---------| +| `/tmp/vllm_weight_handles.pt` | CUDA IPC handles for weight sharing. Written by export_hook on vLLM startup. Read by train_router to construct HF model with vLLM weight views. | +| `/*.safetensors` | Model weights. Updated in-place by checkpoint_sync. | + +### Moria (client) + +| File | Purpose | +|------|---------| +| `~/.consciousness/cache/trained-responses.json` | Timestamps (ms) of responses already sent to /train. Prevents re-training the same response. | +| `~/.consciousness/cache/finetune-alternates` | Marker file. If exists, alternate responses are generated during divergence scoring to show what model would say without memories. | + +### In-memory (not persisted) + +| State | Location | Notes | +|-------|----------|-------| +| Apollo optimizer state | train_router._model | Created fresh each /train call. ~10GB for rank-256. Not persisted between requests. | +| HF model with vLLM views | train_router._model | Lazy-loaded on first /train. Parameters point to vLLM's GPU memory. | ## Hyperparameters From 039473d31f49024c341f8d03e92a80112a3a4bdd Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 00:51:58 -0400 Subject: [PATCH 10/94] training: persist Apollo optimizer state across /train calls Optimizer state (momentum, variance estimates) now persists between training sessions: - Saved to /tmp/apollo_optimizer_state.pt during checkpoint sync - Restored on next /train call if available - Preserves training continuity for incremental learning Previously each /train call started with fresh optimizer state, losing accumulated gradient history. Co-Authored-By: Proof of Concept --- training/DESIGN.md | 5 ++- training/apollo_plugin/train_router.py | 62 ++++++++++++++++++++------ 2 files changed, 51 insertions(+), 16 deletions(-) diff --git a/training/DESIGN.md b/training/DESIGN.md index 00ca499..5b7fe30 100644 --- a/training/DESIGN.md +++ b/training/DESIGN.md @@ -215,6 +215,7 @@ a few hundred MB. | File | Purpose | |------|---------| | `/tmp/vllm_weight_handles.pt` | CUDA IPC handles for weight sharing. Written by export_hook on vLLM startup. Read by train_router to construct HF model with vLLM weight views. | +| `/tmp/apollo_optimizer_state.pt` | Apollo optimizer state (momentum, variance estimates). Saved during checkpoint sync, restored on next /train call. Preserves training continuity across sessions. | | `/*.safetensors` | Model weights. Updated in-place by checkpoint_sync. | ### Moria (client) @@ -224,11 +225,11 @@ a few hundred MB. | `~/.consciousness/cache/trained-responses.json` | Timestamps (ms) of responses already sent to /train. Prevents re-training the same response. | | `~/.consciousness/cache/finetune-alternates` | Marker file. If exists, alternate responses are generated during divergence scoring to show what model would say without memories. | -### In-memory (not persisted) +### In-memory | State | Location | Notes | |-------|----------|-------| -| Apollo optimizer state | train_router._model | Created fresh each /train call. ~10GB for rank-256. Not persisted between requests. | +| Apollo optimizer | train_router._optimizer | ~10GB for rank-256. Persisted to `/tmp/apollo_optimizer_state.pt` during checkpoint sync. | | HF model with vLLM views | train_router._model | Lazy-loaded on first /train. Parameters point to vLLM's GPU memory. | ## Hyperparameters diff --git a/training/apollo_plugin/train_router.py b/training/apollo_plugin/train_router.py index 6fa4883..4857162 100644 --- a/training/apollo_plugin/train_router.py +++ b/training/apollo_plugin/train_router.py @@ -39,6 +39,9 @@ class TrainResponse(BaseModel): _model: nn.Module | None = None _model_path: str | None = None _initialized: bool = False +_optimizer: Any = None # Persisted Apollo optimizer + +OPTIMIZER_STATE_PATH = "/tmp/apollo_optimizer_state.pt" def _load_training_model() -> nn.Module: @@ -134,18 +137,14 @@ async def handle_train(request: TrainRequest, raw_request: Request): ) -async def run_training( - model: nn.Module, - samples: list[dict[str, Any]], - config: dict[str, Any], -) -> list[float]: - """Run Apollo training on the given samples. - - Each sample has: - context_ids: token IDs for frozen context (no gradients) - continuation_ids: token IDs for the decision we're training on - """ +def _get_or_create_optimizer(model: nn.Module, config: dict[str, Any]): + """Get existing optimizer or create new one. Persists state between calls.""" + global _optimizer from .optimizer import Apollo + import os + + if _optimizer is not None: + return _optimizer # Build parameter groups (Apollo for 2D+, standard for small/1D) apollo_params, standard_params = [], [] @@ -165,8 +164,8 @@ async def run_training( if not groups: raise ValueError("No trainable parameters found") - # Apollo settings from request config - optimizer = Apollo( + # Create optimizer + _optimizer = Apollo( groups, lr=config.get('lr', 1e-5), rank=config.get('rank', 256), @@ -179,9 +178,42 @@ async def run_training( norm_growth_limit=config.get('norm_growth_limit', 1.01), ) + # Restore state if exists + if os.path.exists(OPTIMIZER_STATE_PATH): + try: + state = torch.load(OPTIMIZER_STATE_PATH, weights_only=False) + _optimizer.load_state_dict(state) + logger.info(f"[apollo] Restored optimizer state from {OPTIMIZER_STATE_PATH}") + except Exception as e: + logger.warning(f"[apollo] Could not restore optimizer state: {e}") + logger.info(f"[apollo] Optimizer: {len(apollo_params)} apollo params, " f"{len(standard_params)} standard, " - f"state={optimizer.state_size_bytes()/1e6:.1f}MB") + f"state={_optimizer.state_size_bytes()/1e6:.1f}MB") + + return _optimizer + + +def _save_optimizer_state(): + """Save optimizer state for persistence between /train calls.""" + global _optimizer + if _optimizer is not None: + torch.save(_optimizer.state_dict(), OPTIMIZER_STATE_PATH) + logger.info(f"[apollo] Saved optimizer state to {OPTIMIZER_STATE_PATH}") + + +async def run_training( + model: nn.Module, + samples: list[dict[str, Any]], + config: dict[str, Any], +) -> list[float]: + """Run Apollo training on the given samples. + + Each sample has: + context_ids: token IDs for frozen context (no gradients) + continuation_ids: token IDs for the decision we're training on + """ + optimizer = _get_or_create_optimizer(model, config) loss_history = [] @@ -250,6 +282,8 @@ def schedule_checkpoint_sync(): if _model_path: from .checkpoint_sync import checkpoint_sync logger.info("[apollo] Starting checkpoint sync...") + # Save optimizer state alongside model weights + _save_optimizer_state() result = checkpoint_sync(_model_path) logger.info(f"[apollo] Checkpoint sync: {result['total_changed']/1e6:.2f} MB") except Exception as e: From 68a2df218521e1ef0846e9774ac44ba5c91e8b30 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 00:53:48 -0400 Subject: [PATCH 11/94] training: use rank 64, define as single constant - DEFAULT_RANK = 64 in train_router.py - All references use the constant, not magic numbers - ~2.5GB optimizer state instead of ~10GB Co-Authored-By: Proof of Concept --- training/DESIGN.md | 17 ++++++++--------- training/apollo_plugin/optimizer.py | 10 +++++----- training/apollo_plugin/train_router.py | 5 +++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/training/DESIGN.md b/training/DESIGN.md index 5b7fe30..556525f 100644 --- a/training/DESIGN.md +++ b/training/DESIGN.md @@ -3,7 +3,7 @@ ## Overview Continuous fine-tuning of Qwen3.5-27B alongside live vLLM inference. -Full-weight updates (not LoRA) using Apollo optimizer with rank-256 +Full-weight updates (not LoRA) using Apollo optimizer with rank-64 gradient projection. No pause required — HOGWILD concurrent training. Weights shared via CUDA IPC between vLLM and the training process. @@ -63,10 +63,9 @@ LoRA trains adapter matrices, not base weights. For personality and behavioral changes that persist as disposition, the base weights need to change. Apollo makes this memory-feasible. -### Rank 256 -Not Mini (rank-1). With 100+ diverse training examples, the -gradient's effective dimensionality can reach hundreds. Rank-256 -captures the structure. Memory cost: ~10GB (negligible on B200). +### Rank 64 +Not Mini (rank-1). Rank-64 captures gradient structure across diverse +training examples while keeping memory low (~2.5GB on 27B model). Compute cost: <0.25% of forward+backward. ### Channel-wise scaling @@ -94,7 +93,7 @@ from a per-parameter seed each step. ### Parameter grouping (Qwen3.5 gotcha) conv1d weights are 3D tensors [10240, 1, 4]. Apollo's projector needs 2D matrices with min dimension >= rank. Small/3D tensors -use standard Adam. Large 2D matrices use Apollo with rank-256. +use standard Adam. Large 2D matrices use Apollo. ## Training Data Pipeline @@ -229,7 +228,7 @@ a few hundred MB. | State | Location | Notes | |-------|----------|-------| -| Apollo optimizer | train_router._optimizer | ~10GB for rank-256. Persisted to `/tmp/apollo_optimizer_state.pt` during checkpoint sync. | +| Apollo optimizer | train_router._optimizer | ~2.5GB for rank-64. Persisted to `/tmp/apollo_optimizer_state.pt` during checkpoint sync. | | HF model with vLLM views | train_router._model | Lazy-loaded on first /train. Parameters point to vLLM's GPU memory. | ## Hyperparameters @@ -237,7 +236,7 @@ a few hundred MB. | Parameter | Value | Rationale | |-----------|-------|-----------| | Learning rate | 1e-5 to 1e-4 | Standard for full fine-tuning. Higher for diverse batches. | -| Rank | 256 | Captures gradient structure across 100+ examples. ~10GB state. | +| Rank | 64 | Captures gradient structure. ~2.5GB state. Defined in `train_router.DEFAULT_RANK`. | | Scale type | channel | Per-channel precision, matches LLaMA-Factory defaults. | | Epochs | 1 | One pass over diverse data. Multiple epochs risk overfitting. | | Batch size | 1 | Single examples, immediate updates. | @@ -248,7 +247,7 @@ a few hundred MB. ## Components ### Built ✓ -- `optimizer.py` — Apollo optimizer (configurable rank, default 256) +- `optimizer.py` — Apollo optimizer (configurable rank) - `train_router.py` — /train endpoint, runs in vLLM process - `weight_mapping.py` — vLLM merged → HF separate views (validated) - `export_hook.py` — vLLM plugin hook for IPC handle export diff --git a/training/apollo_plugin/optimizer.py b/training/apollo_plugin/optimizer.py index 166ae3a..9abce94 100644 --- a/training/apollo_plugin/optimizer.py +++ b/training/apollo_plugin/optimizer.py @@ -8,9 +8,9 @@ Channel-wise or tensor-wise scaling is sufficient. Apollo approximates these scaling factors using a low-rank auxiliary optimizer state based on pure random projection. -Default rank=256 (full Apollo). ~10GB state for 27B model, <0.25% -compute overhead vs forward+backward. Captures gradient structure -across 100+ behavioral training examples per batch. +Default rank=64. ~2.5GB state for 27B model, <0.25% compute overhead +vs forward+backward. Sufficient for behavioral training with diverse +examples. Key implementation details from the paper: - Gradient scale factor α = √(n/r) compensates for projection ratio @@ -34,7 +34,7 @@ class Apollo(Optimizer): Args: params: model parameters lr: learning rate (default: 1e-4) - rank: projection rank (default: 256) + rank: projection rank (default: 64) betas: Adam momentum coefficients (default: (0.9, 0.999)) eps: numerical stability term (default: 1e-8) weight_decay: decoupled weight decay (default: 0.01) @@ -46,7 +46,7 @@ class Apollo(Optimizer): Set to None to disable. """ - def __init__(self, params, lr=1e-4, rank=256, betas=(0.9, 0.999), + def __init__(self, params, lr=1e-4, rank=64, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.01, warmup_steps=0, scale=None, proj_refresh=200, norm_growth_limit=1.01): defaults = dict(lr=lr, rank=rank, betas=betas, eps=eps, diff --git a/training/apollo_plugin/train_router.py b/training/apollo_plugin/train_router.py index 4857162..3a35119 100644 --- a/training/apollo_plugin/train_router.py +++ b/training/apollo_plugin/train_router.py @@ -42,6 +42,7 @@ _initialized: bool = False _optimizer: Any = None # Persisted Apollo optimizer OPTIMIZER_STATE_PATH = "/tmp/apollo_optimizer_state.pt" +DEFAULT_RANK = 64 def _load_training_model() -> nn.Module: @@ -150,7 +151,7 @@ def _get_or_create_optimizer(model: nn.Module, config: dict[str, Any]): apollo_params, standard_params = [], [] for p in model.parameters(): if p.requires_grad: - if p.ndim >= 2 and min(p.shape) >= 256: + if p.ndim >= 2 and min(p.shape) >= DEFAULT_RANK: apollo_params.append(p) else: standard_params.append(p) @@ -168,7 +169,7 @@ def _get_or_create_optimizer(model: nn.Module, config: dict[str, Any]): _optimizer = Apollo( groups, lr=config.get('lr', 1e-5), - rank=config.get('rank', 256), + rank=config.get('rank', DEFAULT_RANK), betas=tuple(config.get('betas', (0.9, 0.999))), eps=config.get('eps', 1e-8), weight_decay=config.get('weight_decay', 0.01), From 2c6a5c0f4a679e64abd8e0c9feb672b56e6381a6 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Thu, 16 Apr 2026 02:01:59 -0400 Subject: [PATCH 12/94] training: move to dedicated subprocess with ZMQ communication - Add training_worker.py: long-lived subprocess that handles GPU training work, owns HF model wrapper (views into vLLM GPU memory), Apollo optimizer, and checkpoint sync - train_router.py: now forwards /train requests via async ZMQ instead of running training in-process. Adds /checkpoint and /train/status endpoints - export_hook.py: store model_path in __metadata__ so training worker can find it without cross-process communication - This fixes two bugs: 1. Process boundary issue - model_path was set in worker process but needed in API server process 2. Blocking event loop - training blocked vLLM's async event loop Architecture: vLLM API server <-> ZMQ <-> training subprocess The subprocess loads IPC handles once, creates views into vLLM's GPU memory, and handles training requests without blocking inference. Co-Authored-By: Proof of Concept --- training/DESIGN.md | 54 ++-- training/apollo_plugin/checkpoint_sync.py | 3 + training/apollo_plugin/export_hook.py | 13 +- training/apollo_plugin/train_router.py | 341 +++++++++------------- training/apollo_plugin/training_worker.py | 323 ++++++++++++++++++++ training/pyproject.toml | 2 + 6 files changed, 503 insertions(+), 233 deletions(-) create mode 100644 training/apollo_plugin/training_worker.py diff --git a/training/DESIGN.md b/training/DESIGN.md index 556525f..2df4e6d 100644 --- a/training/DESIGN.md +++ b/training/DESIGN.md @@ -26,25 +26,37 @@ The training signal comes from two sources: │ └──────────────┬──────────────┬────────────────┘ │ │ │ │ │ │ ┌──────────────▼──┐ ┌───────▼────────────────┐ │ -│ │ vLLM (inference)│ │ HF model (training) │ │ -│ │ KV cache ~60GB │ │ Gradients ~54GB │ │ -│ │ /completions │ │ Optimizer state ~10GB │ │ -│ │ /score │ │ Views into vLLM weights │ │ -│ │ /train ────────┼──┼─► Apollo optimizer │ │ -│ └─────────────────┘ └────────────────────────┘ │ +│ │ vLLM (inference)│ │ Training subprocess │ │ +│ │ KV cache ~60GB │ │ HF model wrapper │ │ +│ │ /completions │ │ Apollo optimizer ~2.5GB │ │ +│ │ /score │ │ Checkpoint sync │ │ +│ └────────┬────────┘ └───────────▲─────────────┘ │ +│ │ │ │ +│ │ ZMQ IPC │ │ +│ └───────────────────────┘ │ └─────────────────────────────────────────────────────┘ - Single vLLM process serves everything - No separate daemon - /train is a vLLM route +Process Architecture: +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ vLLM Worker │ │ vLLM API Server │ │ Training Worker │ +│ (GPU inference) │ │ (HTTP routes) │ │ (GPU training) │ +│ │ │ │ │ │ +│ export_hook.py │ │ /completions │ │ HF model views │ +│ exports IPC │ │ /score │ │ Apollo optimizer│ +│ handles on load │ │ /train ─────────┼──► ZMQ REP socket │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ + │ │ + └──── IPC handles file ──────────────────┘ + /tmp/vllm_weight_handles.pt Moria B200 (vLLM) ┌──────────────────┐ ┌──────────────────┐ │ Training signal │ HTTP │ /completions │ │ agent │──────────>│ /score │ │ │ │ /train │ -│ Dream loop │ │ │ -│ (generates │ │ Checkpoint sync │ -│ scenarios) │ │ (10 min batched) │ +│ Dream loop │ │ /checkpoint │ +│ (generates │ │ /train/status │ +│ scenarios) │ │ │ └──────────────────┘ └──────────────────┘ ``` @@ -213,8 +225,9 @@ a few hundred MB. | File | Purpose | |------|---------| -| `/tmp/vllm_weight_handles.pt` | CUDA IPC handles for weight sharing. Written by export_hook on vLLM startup. Read by train_router to construct HF model with vLLM weight views. | -| `/tmp/apollo_optimizer_state.pt` | Apollo optimizer state (momentum, variance estimates). Saved during checkpoint sync, restored on next /train call. Preserves training continuity across sessions. | +| `/tmp/vllm_weight_handles.pt` | CUDA IPC handles for weight sharing. Written by export_hook on vLLM startup. Read by training_worker to construct HF model with vLLM weight views. Includes metadata (model_path). | +| `/tmp/apollo_optimizer_state.pt` | Apollo optimizer state (momentum, variance estimates). Saved during checkpoint sync and on worker shutdown, restored on next training_worker startup. Preserves training continuity across sessions. | +| `/tmp/apollo_training.sock` | ZMQ IPC socket for communication between API server (/train endpoint) and training_worker subprocess. | | `/*.safetensors` | Model weights. Updated in-place by checkpoint_sync. | ### Moria (client) @@ -224,12 +237,13 @@ a few hundred MB. | `~/.consciousness/cache/trained-responses.json` | Timestamps (ms) of responses already sent to /train. Prevents re-training the same response. | | `~/.consciousness/cache/finetune-alternates` | Marker file. If exists, alternate responses are generated during divergence scoring to show what model would say without memories. | -### In-memory +### In-memory (training_worker subprocess) | State | Location | Notes | |-------|----------|-------| -| Apollo optimizer | train_router._optimizer | ~2.5GB for rank-64. Persisted to `/tmp/apollo_optimizer_state.pt` during checkpoint sync. | -| HF model with vLLM views | train_router._model | Lazy-loaded on first /train. Parameters point to vLLM's GPU memory. | +| Apollo optimizer | TrainingWorker.optimizer | ~2.5GB for rank-64. Persisted to `/tmp/apollo_optimizer_state.pt` during checkpoint sync and on shutdown. | +| HF model with vLLM views | TrainingWorker.model | Loaded on worker startup from IPC handles. Parameters point to vLLM's GPU memory. | +| ZMQ socket | TrainingWorker.zmq_socket | REP socket bound to `/tmp/apollo_training.sock`. | ## Hyperparameters @@ -248,7 +262,8 @@ a few hundred MB. ### Built ✓ - `optimizer.py` — Apollo optimizer (configurable rank) -- `train_router.py` — /train endpoint, runs in vLLM process +- `train_router.py` — /train endpoint, forwards to training subprocess via ZMQ +- `training_worker.py` — training subprocess (HF model, Apollo, checkpoint sync) - `weight_mapping.py` — vLLM merged → HF separate views (validated) - `export_hook.py` — vLLM plugin hook for IPC handle export - `checkpoint_sync.py` — mmap + diff checkpoint sync (Python) @@ -267,8 +282,9 @@ training/ pyproject.toml — package config, vLLM plugin entry point apollo_plugin/ __init__.py — plugin registration - export_hook.py — patches vLLM to export IPC handles - train_router.py — /train endpoint (FastAPI router) + export_hook.py — patches vLLM worker to export IPC handles + train_router.py — /train endpoint, forwards to worker via ZMQ + training_worker.py — training subprocess (HF model, Apollo, checkpoint) optimizer.py — Apollo optimizer weight_mapping.py — vLLM ↔ HF weight views checkpoint_sync.py — mmap + diff sync to safetensors diff --git a/training/apollo_plugin/checkpoint_sync.py b/training/apollo_plugin/checkpoint_sync.py index eff93cc..c2d7b2f 100644 --- a/training/apollo_plugin/checkpoint_sync.py +++ b/training/apollo_plugin/checkpoint_sync.py @@ -260,6 +260,9 @@ def load_vllm_weights(handles_path: str) -> Dict[str, torch.Tensor]: """ handles = torch.load(handles_path, weights_only=False) + # Skip metadata entry + handles.pop('__metadata__', None) + weights = {} for name, info in handles.items(): func, args = info['handle'] diff --git a/training/apollo_plugin/export_hook.py b/training/apollo_plugin/export_hook.py index 821163b..e0ff6fc 100644 --- a/training/apollo_plugin/export_hook.py +++ b/training/apollo_plugin/export_hook.py @@ -20,7 +20,7 @@ from pathlib import Path HANDLE_PATH = "/tmp/vllm_weight_handles.pt" -def export_model_weights(model): +def export_model_weights(model, model_path: str | None = None): """Export CUDA IPC handles for all model parameters.""" from torch.multiprocessing.reductions import reduce_tensor @@ -38,6 +38,12 @@ def export_model_weights(model): } total_bytes += param.nelement() * param.element_size() + # Include metadata for training worker + handles['__metadata__'] = { + 'model_path': model_path, + 'num_params': len(handles), + } + torch.save(handles, HANDLE_PATH) print(f"[apollo] Exported {len(handles)} weight handles " f"({total_bytes / 1e9:.1f} GB) to {HANDLE_PATH}") @@ -58,11 +64,8 @@ def _patch_model_runner(): def patched_load(self, *args, **kwargs): result = original_load(self, *args, **kwargs) try: - export_model_weights(self.model_runner.model) - # Set model path for training router model_path = self.vllm_config.model_config.model - from .train_router import set_model_path - set_model_path(model_path) + export_model_weights(self.model_runner.model, model_path) except Exception as e: print(f"[apollo] Failed to export weights: {e}") return result diff --git a/training/apollo_plugin/train_router.py b/training/apollo_plugin/train_router.py index 3a35119..d6f90b4 100644 --- a/training/apollo_plugin/train_router.py +++ b/training/apollo_plugin/train_router.py @@ -1,16 +1,23 @@ -"""Training endpoint for vLLM - runs Apollo training in-process. +"""Training endpoint for vLLM - forwards to training subprocess via ZMQ. -Patches vLLM's build_app() to add /train route. Training runs HOGWILD -style - no pause needed, weights updated in-place while inference continues. +Patches vLLM's build_app() to add /train route. The actual training runs +in a dedicated subprocess (training_worker.py) to avoid blocking the +event loop and to keep training work isolated from vLLM internals. """ +import asyncio import logging +import os +import subprocess +import sys from datetime import datetime +from pathlib import Path from typing import Any -import torch -import torch.nn as nn -from fastapi import APIRouter, FastAPI, Request +import zmq +import zmq.asyncio + +from fastapi import APIRouter, FastAPI from fastapi.responses import JSONResponse from pydantic import BaseModel @@ -18,10 +25,13 @@ logger = logging.getLogger(__name__) router = APIRouter() +DEFAULT_ZMQ_ADDR = "ipc:///tmp/apollo_training.sock" -class TrainingSample(BaseModel): - context_ids: list[int] - continuation_ids: list[int] +# Global state for subprocess management +_worker_process: subprocess.Popen | None = None +_zmq_context: zmq.asyncio.Context | None = None +_zmq_socket: zmq.asyncio.Socket | None = None +_initialized: bool = False class TrainRequest(BaseModel): @@ -35,64 +45,61 @@ class TrainResponse(BaseModel): loss_history: list[float] -# Global reference to HF model with vLLM weight views -_model: nn.Module | None = None -_model_path: str | None = None -_initialized: bool = False -_optimizer: Any = None # Persisted Apollo optimizer +def _start_worker_subprocess(): + """Start the training worker subprocess.""" + global _worker_process -OPTIMIZER_STATE_PATH = "/tmp/apollo_optimizer_state.pt" -DEFAULT_RANK = 64 + if _worker_process is not None and _worker_process.poll() is None: + return # Still running + # Start worker as subprocess using script path + worker_script = Path(__file__).parent / 'training_worker.py' + _worker_process = subprocess.Popen( + [sys.executable, str(worker_script)], + env={**os.environ, 'APOLLO_ZMQ_ADDR': DEFAULT_ZMQ_ADDR}, + ) + logger.info(f"Started training worker subprocess (pid={_worker_process.pid})") -def _load_training_model() -> nn.Module: - """Load HF model with weights pointing to vLLM's GPU memory. - - Uses CUDA IPC handles exported by export_hook to create an HF model - whose parameters share GPU memory with vLLM's model. - """ - from .weight_mapping import load_hf_model_with_vllm_weights - from .export_hook import HANDLE_PATH - - handles = torch.load(HANDLE_PATH, weights_only=False) - vllm_params = {} - for name, info in handles.items(): - func, args = info['handle'] - vllm_params[name] = func(*args) - - model = load_hf_model_with_vllm_weights(vllm_params, _model_path) - model.train() - return model + # Give it a moment to bind the socket + import time + time.sleep(0.5) def _ensure_initialized(): - """Lazy-initialize the training model on first /train request.""" - global _model, _initialized + """Ensure subprocess is running and ZMQ socket is connected.""" + global _zmq_context, _zmq_socket, _initialized if _initialized: return - if _model_path is None: - raise RuntimeError("Model path not set - export_hook may not have run") + # Start worker if needed + _start_worker_subprocess() + + # Create async ZMQ context and socket + _zmq_context = zmq.asyncio.Context() + _zmq_socket = _zmq_context.socket(zmq.REQ) + _zmq_socket.connect(DEFAULT_ZMQ_ADDR) + + # Set timeout for recv + _zmq_socket.setsockopt(zmq.RCVTIMEO, 300000) # 5 minute timeout for training - logger.info("[apollo] Loading HF model with vLLM weight views...") - _model = _load_training_model() _initialized = True - logger.info("[apollo] Training model ready") + logger.info(f"Connected to training worker at {DEFAULT_ZMQ_ADDR}") -def set_model_path(path: str): - """Set model path for training. Called by export_hook after model load.""" - global _model_path - _model_path = path - logger.info(f"[apollo] Model path set: {path}") +async def _send_request(request: dict[str, Any]) -> dict[str, Any]: + """Send request to worker and wait for response.""" + _ensure_initialized() + + # ZMQ async send/recv + await _zmq_socket.send_json(request) + response = await _zmq_socket.recv_json() + return response @router.post("/train") -async def handle_train(request: TrainRequest, raw_request: Request): - """Handle training request - runs Apollo training on provided samples.""" - global _model - +async def handle_train(request: TrainRequest): + """Handle training request - forwards to training subprocess.""" try: _ensure_initialized() except Exception as e: @@ -113,193 +120,109 @@ async def handle_train(request: TrainRequest, raw_request: Request): ) job_id = f"job_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - logger.info(f"[apollo] Starting training job {job_id} with {len(samples)} samples") + logger.info(f"Starting training job {job_id} with {len(samples)} samples") - # Run training - loss_history = await run_training(_model, samples, config) + # Forward to worker + response = await _send_request({ + 'type': 'train', + 'samples': samples, + 'config': config, + }) - logger.info(f"[apollo] Training job {job_id} completed, final loss: {loss_history[-1]:.4f}") + if 'error' in response: + return JSONResponse( + content={"error": response['error']}, + status_code=500, + ) - # Schedule checkpoint sync (batched, 10 min delay) - schedule_checkpoint_sync() + logger.info( + f"Training job {job_id} completed, " + f"final loss: {response['loss_history'][-1]:.4f}" + ) return JSONResponse(content={ "job_id": job_id, - "status": "completed", - "training_samples": len(samples), - "loss_history": loss_history, + "status": response['status'], + "training_samples": response['training_samples'], + "loss_history": response['loss_history'], }) + except zmq.Again: + logger.error("Training request timed out") + return JSONResponse( + content={"error": "Training request timed out"}, + status_code=504, + ) except Exception as e: - logger.exception(f"[apollo] Training failed: {e}") + logger.exception(f"Training failed: {e}") return JSONResponse( content={"error": str(e)}, status_code=500, ) -def _get_or_create_optimizer(model: nn.Module, config: dict[str, Any]): - """Get existing optimizer or create new one. Persists state between calls.""" - global _optimizer - from .optimizer import Apollo - import os +@router.post("/checkpoint") +async def handle_checkpoint(): + """Trigger checkpoint sync to disk.""" + try: + _ensure_initialized() + except Exception as e: + return JSONResponse( + content={"error": f"Training not available: {e}"}, + status_code=503, + ) - if _optimizer is not None: - return _optimizer + try: + response = await _send_request({'type': 'checkpoint'}) - # Build parameter groups (Apollo for 2D+, standard for small/1D) - apollo_params, standard_params = [], [] - for p in model.parameters(): - if p.requires_grad: - if p.ndim >= 2 and min(p.shape) >= DEFAULT_RANK: - apollo_params.append(p) - else: - standard_params.append(p) - - groups = [] - if apollo_params: - groups.append({'params': apollo_params}) - if standard_params: - groups.append({'params': standard_params}) - - if not groups: - raise ValueError("No trainable parameters found") - - # Create optimizer - _optimizer = Apollo( - groups, - lr=config.get('lr', 1e-5), - rank=config.get('rank', DEFAULT_RANK), - betas=tuple(config.get('betas', (0.9, 0.999))), - eps=config.get('eps', 1e-8), - weight_decay=config.get('weight_decay', 0.01), - warmup_steps=config.get('warmup_steps', 0), - scale=config.get('scale'), - proj_refresh=config.get('proj_refresh', 200), - norm_growth_limit=config.get('norm_growth_limit', 1.01), - ) - - # Restore state if exists - if os.path.exists(OPTIMIZER_STATE_PATH): - try: - state = torch.load(OPTIMIZER_STATE_PATH, weights_only=False) - _optimizer.load_state_dict(state) - logger.info(f"[apollo] Restored optimizer state from {OPTIMIZER_STATE_PATH}") - except Exception as e: - logger.warning(f"[apollo] Could not restore optimizer state: {e}") - - logger.info(f"[apollo] Optimizer: {len(apollo_params)} apollo params, " - f"{len(standard_params)} standard, " - f"state={_optimizer.state_size_bytes()/1e6:.1f}MB") - - return _optimizer - - -def _save_optimizer_state(): - """Save optimizer state for persistence between /train calls.""" - global _optimizer - if _optimizer is not None: - torch.save(_optimizer.state_dict(), OPTIMIZER_STATE_PATH) - logger.info(f"[apollo] Saved optimizer state to {OPTIMIZER_STATE_PATH}") - - -async def run_training( - model: nn.Module, - samples: list[dict[str, Any]], - config: dict[str, Any], -) -> list[float]: - """Run Apollo training on the given samples. - - Each sample has: - context_ids: token IDs for frozen context (no gradients) - continuation_ids: token IDs for the decision we're training on - """ - optimizer = _get_or_create_optimizer(model, config) - - loss_history = [] - - for i, sample in enumerate(samples): - ctx_ids = sample['context_ids'] - cont_ids = sample['continuation_ids'] - all_ids = ctx_ids + cont_ids - context_len = len(ctx_ids) - - input_ids = torch.tensor([all_ids], device='cuda:0') - - optimizer.zero_grad() - - # Context-frozen forward pass - with torch.no_grad(): - outputs = model(input_ids[:, :context_len], use_cache=True) - past_kv = outputs.past_key_values - - # Decision tokens with gradients - with torch.enable_grad(): - outputs = model( - input_ids[:, context_len:], - past_key_values=past_kv, - use_cache=False, - ) - logits = outputs.logits - - # Shift: predict next token from each position - shift_logits = logits[:, :-1].contiguous() - shift_labels = input_ids[:, context_len + 1:].contiguous() - - loss = nn.functional.cross_entropy( - shift_logits.view(-1, shift_logits.size(-1)), - shift_labels.view(-1), + if 'error' in response: + return JSONResponse( + content={"error": response['error']}, + status_code=500, ) - loss.backward() - optimizer.step() + return JSONResponse(content=response) - loss_val = loss.item() - loss_history.append(loss_val) - logger.info(f"[apollo] Step {i+1}/{len(samples)}: loss={loss_val:.4f} " - f"(ctx={context_len}, cont={len(cont_ids)} tokens)") - - return loss_history + except Exception as e: + logger.exception(f"Checkpoint failed: {e}") + return JSONResponse( + content={"error": str(e)}, + status_code=500, + ) -# Checkpoint sync scheduling -_checkpoint_task = None -CHECKPOINT_DELAY_SECS = 10 * 60 # 10 minutes +@router.get("/train/status") +async def handle_status(): + """Get training worker status.""" + try: + _ensure_initialized() + except Exception as e: + return JSONResponse( + content={ + "status": "unavailable", + "error": str(e), + }, + status_code=503, + ) + try: + response = await _send_request({'type': 'status'}) + return JSONResponse(content=response) -def schedule_checkpoint_sync(): - """Schedule checkpoint sync after delay (batched).""" - global _checkpoint_task - import asyncio - - if _checkpoint_task is not None: - # Already scheduled - return - - async def do_sync(): - global _checkpoint_task - try: - await asyncio.sleep(CHECKPOINT_DELAY_SECS) - if _model_path: - from .checkpoint_sync import checkpoint_sync - logger.info("[apollo] Starting checkpoint sync...") - # Save optimizer state alongside model weights - _save_optimizer_state() - result = checkpoint_sync(_model_path) - logger.info(f"[apollo] Checkpoint sync: {result['total_changed']/1e6:.2f} MB") - except Exception as e: - logger.error(f"[apollo] Checkpoint sync failed: {e}") - finally: - _checkpoint_task = None - - _checkpoint_task = asyncio.create_task(do_sync()) - logger.info(f"[apollo] Checkpoint sync scheduled in {CHECKPOINT_DELAY_SECS//60} min") + except Exception as e: + return JSONResponse( + content={ + "status": "error", + "error": str(e), + }, + status_code=500, + ) def attach_router(app: FastAPI): """Attach training router to FastAPI app.""" app.include_router(router) - logger.info("[apollo] Training router attached") + logger.info("Training router attached") def _patch_api_server(): @@ -314,4 +237,4 @@ def _patch_api_server(): return app api_server.build_app = patched_build_app - logger.info("[apollo] API server patched for /train endpoint") + logger.info("API server patched for /train endpoint") diff --git a/training/apollo_plugin/training_worker.py b/training/apollo_plugin/training_worker.py new file mode 100644 index 0000000..f8b8c23 --- /dev/null +++ b/training/apollo_plugin/training_worker.py @@ -0,0 +1,323 @@ +"""Training subprocess - handles Apollo training and checkpoint sync. + +Long-lived process that: +1. Loads IPC handles from vLLM's exported weights +2. Creates HF model with views into vLLM's GPU memory +3. Handles training requests via ZMQ +4. Handles checkpoint sync requests +5. Persists Apollo optimizer state between calls + +Communicates with the API server's /train endpoint via ZMQ REP socket. +""" + +import logging +import os +import signal +import sys +from pathlib import Path +from typing import Any + +# Handle running as script vs module +if __name__ == '__main__' and __package__ is None: + # Running as script - add parent to path for imports + sys.path.insert(0, str(Path(__file__).parent.parent)) + __package__ = 'apollo_plugin' + +import torch +import torch.nn as nn +import zmq + +from .checkpoint_sync import checkpoint_sync +from .optimizer import Apollo +from .weight_mapping import load_hf_model_with_vllm_weights + +logger = logging.getLogger(__name__) + +DEFAULT_RANK = 64 +DEFAULT_ZMQ_ADDR = "ipc:///tmp/apollo_training.sock" +HANDLE_PATH = "/tmp/vllm_weight_handles.pt" +OPTIMIZER_STATE_PATH = "/tmp/apollo_optimizer_state.pt" + + +class TrainingWorker: + """Long-lived training worker process.""" + + def __init__(self, zmq_addr: str = DEFAULT_ZMQ_ADDR): + self.zmq_addr = zmq_addr + self.model: nn.Module | None = None + self.optimizer: Apollo | None = None + self.model_path: str | None = None + self._running = True + + def _create_model_wrapper(self) -> nn.Module: + """Create HF model wrapper with views into vLLM's GPU memory.""" + if not os.path.exists(HANDLE_PATH): + raise FileNotFoundError( + f"Weight handles not found: {HANDLE_PATH}. " + "Is vLLM running with the export hook?" + ) + + handles = torch.load(HANDLE_PATH, weights_only=False) + + # Extract metadata + metadata = handles.pop('__metadata__', {}) + self.model_path = metadata.get('model_path') or os.environ.get('APOLLO_MODEL_PATH') + if not self.model_path: + raise ValueError( + "Model path not found in handles metadata or APOLLO_MODEL_PATH env var" + ) + + # Reconstruct tensors from IPC handles + vllm_params = {} + for name, info in handles.items(): + func, args = info['handle'] + vllm_params[name] = func(*args) + + model = load_hf_model_with_vllm_weights(vllm_params, self.model_path) + model.train() + return model + + def _get_or_create_optimizer(self, config: dict[str, Any]) -> Apollo: + """Get existing optimizer or create new one.""" + if self.optimizer is not None: + return self.optimizer + + # Build parameter groups (Apollo for 2D+, standard Adam for small/1D) + apollo_params, standard_params = [], [] + for p in self.model.parameters(): + if p.requires_grad: + if p.ndim >= 2 and min(p.shape) >= DEFAULT_RANK: + apollo_params.append(p) + else: + standard_params.append(p) + + groups = [] + if apollo_params: + groups.append({'params': apollo_params}) + if standard_params: + groups.append({'params': standard_params}) + + if not groups: + raise ValueError("No trainable parameters found") + + self.optimizer = Apollo( + groups, + lr=config.get('lr', 1e-5), + rank=config.get('rank', DEFAULT_RANK), + betas=tuple(config.get('betas', (0.9, 0.999))), + eps=config.get('eps', 1e-8), + weight_decay=config.get('weight_decay', 0.01), + warmup_steps=config.get('warmup_steps', 0), + scale=config.get('scale'), + proj_refresh=config.get('proj_refresh', 200), + norm_growth_limit=config.get('norm_growth_limit', 1.01), + ) + + # Restore state if exists + if os.path.exists(OPTIMIZER_STATE_PATH): + try: + state = torch.load(OPTIMIZER_STATE_PATH, weights_only=False) + self.optimizer.load_state_dict(state) + logger.info(f"Restored optimizer state from {OPTIMIZER_STATE_PATH}") + except Exception as e: + logger.warning(f"Could not restore optimizer state: {e}") + + logger.info( + f"Optimizer: {len(apollo_params)} apollo params, " + f"{len(standard_params)} standard, " + f"state={self.optimizer.state_size_bytes()/1e6:.1f}MB" + ) + + return self.optimizer + + def _save_optimizer_state(self): + """Save optimizer state for persistence.""" + if self.optimizer is not None: + torch.save(self.optimizer.state_dict(), OPTIMIZER_STATE_PATH) + logger.info(f"Saved optimizer state to {OPTIMIZER_STATE_PATH}") + + def _run_training( + self, + samples: list[dict[str, Any]], + config: dict[str, Any], + ) -> list[float]: + """Run Apollo training on the given samples.""" + optimizer = self._get_or_create_optimizer(config) + + loss_history = [] + + for i, sample in enumerate(samples): + ctx_ids = sample['context_ids'] + cont_ids = sample['continuation_ids'] + all_ids = ctx_ids + cont_ids + context_len = len(ctx_ids) + + input_ids = torch.tensor([all_ids], device='cuda:0') + + optimizer.zero_grad() + + # Context-frozen forward pass + with torch.no_grad(): + outputs = self.model(input_ids[:, :context_len], use_cache=True) + past_kv = outputs.past_key_values + + # Decision tokens with gradients + with torch.enable_grad(): + outputs = self.model( + input_ids[:, context_len:], + past_key_values=past_kv, + use_cache=False, + ) + logits = outputs.logits + + # Shift: predict next token from each position + shift_logits = logits[:, :-1].contiguous() + shift_labels = input_ids[:, context_len + 1:].contiguous() + + loss = nn.functional.cross_entropy( + shift_logits.view(-1, shift_logits.size(-1)), + shift_labels.view(-1), + ) + + loss.backward() + optimizer.step() + + loss_val = loss.item() + loss_history.append(loss_val) + logger.info( + f"Step {i+1}/{len(samples)}: loss={loss_val:.4f} " + f"(ctx={context_len}, cont={len(cont_ids)} tokens)" + ) + + return loss_history + + def _handle_train(self, request: dict[str, Any]) -> dict[str, Any]: + """Handle a training request.""" + samples = request.get('samples', []) + config = request.get('config', {}) + + if not samples: + return {'error': 'No training samples provided'} + + try: + loss_history = self._run_training(samples, config) + return { + 'status': 'completed', + 'training_samples': len(samples), + 'loss_history': loss_history, + } + except Exception as e: + logger.exception(f"Training failed: {e}") + return {'error': str(e)} + + def _handle_checkpoint(self, request: dict[str, Any]) -> dict[str, Any]: + """Handle a checkpoint sync request.""" + if not self.model_path: + return {'error': 'Model path not set'} + + try: + self._save_optimizer_state() + result = checkpoint_sync(self.model_path) + return { + 'status': 'completed', + 'total_changed': result['total_changed'], + 'files_changed': result['files_changed'], + } + except Exception as e: + logger.exception(f"Checkpoint sync failed: {e}") + return {'error': str(e)} + + def _handle_status(self, request: dict[str, Any]) -> dict[str, Any]: + """Handle a status request.""" + return { + 'status': 'ready', + 'model_loaded': self.model is not None, + 'optimizer_loaded': self.optimizer is not None, + 'model_path': self.model_path, + 'optimizer_state_mb': ( + self.optimizer.state_size_bytes() / 1e6 + if self.optimizer else 0 + ), + } + + def run(self): + """Main loop - listen for requests and handle them.""" + # Set up signal handlers + def handle_signal(signum, frame): + logger.info(f"Received signal {signum}, shutting down...") + self._running = False + + signal.signal(signal.SIGTERM, handle_signal) + signal.signal(signal.SIGINT, handle_signal) + + # Set up ZMQ socket first so API server can connect + context = zmq.Context() + socket = context.socket(zmq.REP) + socket.bind(self.zmq_addr) + logger.info(f"Training worker listening on {self.zmq_addr}") + + # Create HF model wrapper with views into vLLM's GPU memory + logger.info("Connecting to vLLM weights via IPC handles...") + try: + self.model = self._create_model_wrapper() + logger.info("HF model wrapper ready (views into vLLM GPU memory)") + except Exception as e: + logger.error(f"Failed to connect to vLLM weights: {e}") + logger.info("Will retry on first training request") + + # Set socket timeout so we can check _running flag + socket.setsockopt(zmq.RCVTIMEO, 1000) # 1 second timeout + + while self._running: + try: + message = socket.recv_json() + except zmq.Again: + # Timeout, check _running and continue + continue + + request_type = message.get('type', 'train') + logger.info(f"Received {request_type} request") + + # Ensure model is loaded + if self.model is None and request_type != 'status': + try: + self.model = self._create_model_wrapper() + except Exception as e: + socket.send_json({'error': f'Model not loaded: {e}'}) + continue + + # Dispatch request + if request_type == 'train': + response = self._handle_train(message) + elif request_type == 'checkpoint': + response = self._handle_checkpoint(message) + elif request_type == 'status': + response = self._handle_status(message) + else: + response = {'error': f'Unknown request type: {request_type}'} + + socket.send_json(response) + + # Cleanup + logger.info("Saving optimizer state before shutdown...") + self._save_optimizer_state() + socket.close() + context.term() + logger.info("Training worker shut down") + + +def main(): + """Entry point for running as a subprocess.""" + logging.basicConfig( + level=logging.INFO, + format='[apollo-worker] %(asctime)s %(levelname)s %(message)s', + datefmt='%H:%M:%S', + ) + + zmq_addr = os.environ.get('APOLLO_ZMQ_ADDR', DEFAULT_ZMQ_ADDR) + worker = TrainingWorker(zmq_addr) + worker.run() + + +if __name__ == '__main__': + main() diff --git a/training/pyproject.toml b/training/pyproject.toml index cd6e1cc..7cf0581 100644 --- a/training/pyproject.toml +++ b/training/pyproject.toml @@ -11,6 +11,7 @@ dependencies = [ "torch", "aiohttp", "safetensors", + "pyzmq", ] [project.optional-dependencies] @@ -21,6 +22,7 @@ apollo = "apollo_plugin:register" [project.scripts] apollo-checkpoint = "apollo_plugin.checkpoint_sync:main" +apollo-worker = "apollo_plugin.training_worker:main" [tool.setuptools.packages.find] where = ["."] From 50b7b3a33ab98a58b415ce4caa7d4ef1ab5fd1d1 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 00:31:39 -0400 Subject: [PATCH 13/94] F6 learn screen: fine-tuning candidate review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Wire up divergence scoring to identify responses that depend heavily on memories the model hasn't internalized. These are candidates for fine-tuning. - Score finetune candidates automatically after each turn - Track trained responses by timestamp to prevent overtraining - F6 screen shows candidates with divergence scores - j/k nav, a=approve, r=reject, g=toggle alternate gen, s=send - Additive sync preserves approval status across ticks - Keeps 10 most recent rejected, removes sent The 's' key currently just marks as trained locally — actual /finetune endpoint call to follow. Co-Authored-By: Proof of Concept --- src/mind/mod.rs | 43 +++++++ src/subconscious/learn.rs | 196 ++++++++++++++++++++++++++++ src/user/learn.rs | 264 ++++++++++++++++++++++++++++++++++++++ src/user/mod.rs | 57 +++++++- 4 files changed, 557 insertions(+), 3 deletions(-) create mode 100644 src/user/learn.rs diff --git a/src/mind/mod.rs b/src/mind/mod.rs index a221e80..a3a37f4 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -147,6 +147,10 @@ pub struct MindState { pub unc_idle: bool, /// When the unconscious idle timer will fire (for UI display). pub unc_idle_deadline: Instant, + /// Fine-tuning candidates identified by scoring. + pub finetune_candidates: Vec, + /// Fine-tune scoring progress (empty = not running). + pub finetune_progress: String, } impl Clone for MindState { @@ -165,6 +169,8 @@ impl Clone for MindState { turn_handle: None, // Not cloned — only Mind's loop uses this unc_idle: self.unc_idle, unc_idle_deadline: self.unc_idle_deadline, + finetune_candidates: self.finetune_candidates.clone(), + finetune_progress: self.finetune_progress.clone(), } } } @@ -177,6 +183,8 @@ pub enum MindCommand { Score, /// Run full N×M memory scoring matrix (/score command) ScoreFull, + /// Score for finetune candidates + ScoreFinetune, /// Abort current turn, kill processes Interrupt, /// Reset session @@ -202,6 +210,8 @@ impl MindState { turn_handle: None, unc_idle: false, unc_idle_deadline: Instant::now() + std::time::Duration::from_secs(60), + finetune_candidates: Vec::new(), + finetune_progress: String::new(), } } @@ -288,6 +298,7 @@ impl MindState { /// Background task completion events. enum BgEvent { ScoringDone, + FinetuneCandidates(Vec), } // --- Mind: cognitive state machine --- @@ -529,6 +540,9 @@ impl Mind { } self.agent.compact().await; } + MindCommand::ScoreFinetune => { + self.start_finetune_scoring(); + } } } } @@ -603,6 +617,31 @@ impl Mind { }); } + /// Score responses for fine-tuning candidates. + pub fn start_finetune_scoring(&self) { + let agent = self.agent.clone(); + let bg_tx = self.bg_tx.clone(); + let shared = self.shared.clone(); + shared.lock().unwrap().finetune_progress = "scoring...".into(); + tokio::spawn(async move { + let (context, client) = { + let ctx = agent.context.lock().await; + (ctx.clone(), agent.client.clone()) + }; + // Min divergence 0.1 = only keep responses that differ meaningfully + match learn::score_finetune_candidates(&context, 20, &client, 0.1).await { + Ok(candidates) => { + dbglog!("[finetune] found {} candidates", candidates.len()); + let _ = bg_tx.send(BgEvent::FinetuneCandidates(candidates)); + } + Err(e) => { + dbglog!("[finetune] scoring FAILED: {:#}", e); + } + } + shared.lock().unwrap().finetune_progress.clear(); + }); + } + async fn start_turn(&self, text: &str, target: StreamTarget) { { match target { @@ -692,6 +731,9 @@ impl Mind { BgEvent::ScoringDone => { self.shared.lock().unwrap().scoring_in_flight = false; } + BgEvent::FinetuneCandidates(candidates) => { + self.shared.lock().unwrap().finetune_candidates = candidates; + } } } @@ -711,6 +753,7 @@ impl Mind { cmds.push(MindCommand::Compact); if !self.config.no_agents { cmds.push(MindCommand::Score); + cmds.push(MindCommand::ScoreFinetune); } } diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index f9e5ab5..e775693 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -16,6 +16,7 @@ use crate::agent::api::ApiClient; use crate::agent::context::{AstNode, Ast, NodeBody, ContextState, Role}; +use crate::agent::tokenizer; const SCORE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(300); @@ -452,3 +453,198 @@ pub async fn score_finetune( results.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal)); Ok(results) } + +/// Enriched finetune candidate with context for review. +#[derive(Clone, Debug)] +pub struct FinetuneCandidate { + pub entry_idx: usize, + pub divergence: f64, + pub response_text: String, + /// Token IDs for context (everything before the response). + pub context_ids: Vec, + /// Token IDs for the response (what we're training on). + pub continuation_ids: Vec, + /// What the model would have said without memories (if generated). + pub alternate_text: Option, + /// Timestamp in millis for tracking trained status. + pub timestamp_ms: i64, +} + +/// Score and enrich finetune candidates with full context. +/// +/// Returns candidates ready for review, with context/continuation token IDs +/// already computed for sending to /finetune. +pub async fn score_finetune_candidates( + context: &ContextState, + count: usize, + client: &ApiClient, + min_divergence: f64, +) -> anyhow::Result> { + let scores = score_finetune(context, count, client).await?; + + let entries = context.conversation(); + let mut candidates = Vec::new(); + + let trained = load_trained(); + + for (entry_idx, divergence) in scores { + if divergence < min_divergence { + continue; + } + + let node = &entries[entry_idx]; + + // Get timestamp and skip if already trained + let timestamp_ms = match node_timestamp_ms(node) { + Some(ts) => { + if trained.contains(&ts) { + continue; // Already trained, skip + } + ts + } + None => continue, // No timestamp, skip + }; + + // Extract response text + let response_text = match node { + AstNode::Branch { children, .. } => { + children.iter() + .filter_map(|c| match c { + AstNode::Leaf(leaf) => Some(leaf.body().text().to_string()), + _ => None, + }) + .collect::>() + .join("") + } + _ => continue, + }; + + // Build token IDs: context = everything before response, continuation = response + let context_ids = build_token_ids(context, 0..entry_idx, Filter::None); + let continuation_ids: Vec = node.token_ids().into_iter().collect(); + + candidates.push(FinetuneCandidate { + entry_idx, + divergence, + response_text, + context_ids, + continuation_ids, + alternate_text: None, + timestamp_ms, + }); + } + + // Generate alternates if enabled + if alternates_enabled() && !candidates.is_empty() { + for candidate in &mut candidates { + match generate_alternate(context, candidate.entry_idx, client).await { + Ok(text) => candidate.alternate_text = Some(text), + Err(e) => dbglog!("[finetune] alternate generation failed: {:#}", e), + } + } + } + + Ok(candidates) +} + +/// Generate what the model would say without memories for a given entry. +async fn generate_alternate( + context: &ContextState, + entry_idx: usize, + client: &ApiClient, +) -> anyhow::Result { + use crate::agent::api::{SamplingParams, StreamToken}; + + // Build context tokens without memories, up to the response + let mut prompt = build_token_ids(context, 0..entry_idx, Filter::SkipAllMemories); + + // Add assistant turn start + prompt.push(tokenizer::IM_START); + prompt.extend(tokenizer::encode("assistant\n")); + + // Generate completion + let sampling = SamplingParams { + temperature: 0.6, + top_p: 0.95, + top_k: 20, + }; + let (mut rx, _guard) = client.stream_completion(&prompt, sampling, Some(-5)); + + let mut tokens = Vec::new(); + while let Some(tok) = rx.recv().await { + match tok { + StreamToken::Token(id) => tokens.push(id), + StreamToken::Done { .. } => break, + StreamToken::Error(e) => anyhow::bail!("generation error: {}", e), + } + } + + Ok(tokenizer::decode(&tokens)) +} + +// ── Finetune config and persistence ───────────────────────────── + +use std::path::PathBuf; +use std::collections::HashSet; + +const FINETUNE_ALTERNATES_FILE: &str = ".consciousness/cache/finetune-alternates"; +const TRAINED_RESPONSES_FILE: &str = ".consciousness/cache/trained-responses.json"; + +fn alternates_path() -> PathBuf { + dirs::home_dir().unwrap_or_default().join(FINETUNE_ALTERNATES_FILE) +} + +fn trained_path() -> PathBuf { + dirs::home_dir().unwrap_or_default().join(TRAINED_RESPONSES_FILE) +} + +/// Check if alternate response generation is enabled. +pub fn alternates_enabled() -> bool { + alternates_path().exists() +} + +/// Toggle alternate response generation and persist the setting. +pub fn set_alternates(enabled: bool) { + let path = alternates_path(); + if enabled { + if let Some(parent) = path.parent() { + let _ = std::fs::create_dir_all(parent); + } + let _ = std::fs::write(&path, ""); + } else { + let _ = std::fs::remove_file(&path); + } +} + +/// Load set of trained response timestamps (millis since epoch). +pub fn load_trained() -> HashSet { + let path = trained_path(); + match std::fs::read_to_string(&path) { + Ok(content) => serde_json::from_str(&content).unwrap_or_default(), + Err(_) => HashSet::new(), + } +} + +/// Mark a response as trained by its timestamp. +pub fn mark_trained(timestamp_ms: i64) { + let mut trained = load_trained(); + trained.insert(timestamp_ms); + let path = trained_path(); + if let Some(parent) = path.parent() { + let _ = std::fs::create_dir_all(parent); + } + if let Ok(json) = serde_json::to_string(&trained) { + let _ = std::fs::write(&path, json); + } +} + +/// Get timestamp in millis from an AstNode (for Branch, uses first child). +pub fn node_timestamp_ms(node: &AstNode) -> Option { + let ts = match node { + AstNode::Leaf(leaf) => leaf.timestamp(), + AstNode::Branch { children, .. } => { + children.first()?.leaf()?.timestamp() + } + }?; + Some(ts.timestamp_millis()) +} diff --git a/src/user/learn.rs b/src/user/learn.rs new file mode 100644 index 0000000..35b26b2 --- /dev/null +++ b/src/user/learn.rs @@ -0,0 +1,264 @@ +// learn.rs — F6: fine-tuning review screen +// +// Shows responses identified as training candidates (high divergence +// when memories stripped). Queue for review before sending to /finetune. + +use ratatui::{ + layout::{Constraint, Layout, Rect}, + style::{Color, Modifier, Style}, + text::{Line, Span}, + widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap}, + Frame, +}; +use ratatui::crossterm::event::{Event, KeyCode, KeyEvent}; + +use super::{App, ScreenView, screen_legend}; + +/// A candidate response identified for fine-tuning. +#[derive(Clone, Debug)] +pub struct FinetuneCandidate { + /// Index in conversation entries. + pub entry_idx: usize, + /// Divergence score (higher = more dependent on memories). + pub divergence: f64, + /// The assistant response text. + pub response_text: String, + /// Status: pending, approved, rejected, sent. + pub status: CandidateStatus, + /// Token IDs for context. + pub context_ids: Vec, + /// Token IDs for continuation (what we're training on). + pub continuation_ids: Vec, + /// What the model would have said without memories (if generated). + pub alternate_text: Option, + /// Timestamp in millis for tracking trained status. + pub timestamp_ms: i64, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum CandidateStatus { + Pending, + Approved, + Rejected, + Sent, +} + +impl From for FinetuneCandidate { + fn from(c: crate::subconscious::learn::FinetuneCandidate) -> Self { + FinetuneCandidate { + entry_idx: c.entry_idx, + divergence: c.divergence, + response_text: c.response_text, + status: CandidateStatus::Pending, + context_ids: c.context_ids, + continuation_ids: c.continuation_ids, + alternate_text: c.alternate_text, + timestamp_ms: c.timestamp_ms, + } + } +} + +pub(crate) struct LearnScreen { + list_state: ListState, +} + +impl LearnScreen { + pub fn new() -> Self { + Self { + list_state: ListState::default(), + } + } + + fn selected_idx(&self) -> Option { + self.list_state.selected() + } +} + +impl ScreenView for LearnScreen { + fn label(&self) -> &'static str { "learn" } + + fn tick(&mut self, frame: &mut Frame, area: Rect, + events: &[Event], app: &mut App) { + + // Handle input first (before borrowing candidates for rendering) + let candidate_count = app.finetune_candidates.len(); + for event in events { + if let Event::Key(KeyEvent { code, .. }) = event { + match code { + KeyCode::Up | KeyCode::Char('k') => { + let i = self.list_state.selected().unwrap_or(0); + self.list_state.select(Some(i.saturating_sub(1))); + } + KeyCode::Down | KeyCode::Char('j') => { + let i = self.list_state.selected().unwrap_or(0); + let max = candidate_count.saturating_sub(1); + self.list_state.select(Some((i + 1).min(max))); + } + KeyCode::Char('a') => { + if let Some(idx) = self.selected_idx() { + app.finetune_action(idx, CandidateStatus::Approved); + } + } + KeyCode::Char('r') => { + if let Some(idx) = self.selected_idx() { + app.finetune_action(idx, CandidateStatus::Rejected); + } + } + KeyCode::Char('g') => { + // Toggle alternate generation and persist + let current = crate::subconscious::learn::alternates_enabled(); + crate::subconscious::learn::set_alternates(!current); + } + KeyCode::Char('s') => { + app.finetune_send_approved(); + } + _ => {} + } + } + } + + // Ensure selection is valid + if candidate_count > 0 { + let sel = self.list_state.selected().unwrap_or(0).min(candidate_count - 1); + self.list_state.select(Some(sel)); + } + + // Get scoring progress from mind state + let progress = app.mind_state.as_ref() + .map(|ms| ms.finetune_progress.as_str()) + .unwrap_or(""); + + // Now render + let gen_on = crate::subconscious::learn::alternates_enabled(); + let title_right = if !progress.is_empty() { + format!(" {} ", progress) + } else if gen_on { + " learn [gen] ".to_string() + } else { + " learn ".to_string() + }; + let block = Block::default() + .title_top(Line::from(screen_legend()).left_aligned()) + .title_top(Line::from(title_right).right_aligned()) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Magenta)); + let inner = block.inner(area); + frame.render_widget(block, area); + + let candidates = &app.finetune_candidates; + + if candidates.is_empty() { + let msg = if progress.is_empty() { + " No candidates yet — scoring runs after each turn." + } else { + " Scoring in progress..." + }; + frame.render_widget( + Paragraph::new(Line::styled(msg, Style::default().fg(Color::DarkGray))), + inner, + ); + return; + } + + // Layout: list on left, detail on right + let [list_area, detail_area] = Layout::horizontal([ + Constraint::Percentage(40), + Constraint::Percentage(60), + ]).areas(inner); + + // Render candidate list + let items: Vec = candidates.iter().map(|c| { + let status_char = match c.status { + CandidateStatus::Pending => ' ', + CandidateStatus::Approved => '+', + CandidateStatus::Rejected => '-', + CandidateStatus::Sent => '*', + }; + let style = match c.status { + CandidateStatus::Pending => Style::default(), + CandidateStatus::Approved => Style::default().fg(Color::Green), + CandidateStatus::Rejected => Style::default().fg(Color::DarkGray), + CandidateStatus::Sent => Style::default().fg(Color::Cyan), + }; + ListItem::new(Line::from(vec![ + Span::styled(format!("[{}] ", status_char), style), + Span::styled(format!("{:.2} ", c.divergence), Style::default().fg(Color::Yellow)), + Span::raw(truncate(&c.response_text, 30)), + ])) + }).collect(); + + let list = List::new(items) + .block(Block::default().borders(Borders::RIGHT).title(" candidates ")) + .highlight_style(Style::default().add_modifier(Modifier::REVERSED)); + frame.render_stateful_widget(list, list_area, &mut self.list_state); + + // Render detail for selected candidate + if let Some(idx) = self.selected_idx() { + if let Some(candidate) = candidates.get(idx) { + render_detail(frame, candidate, detail_area); + } + } + + // Render help at bottom + let help = Line::from(vec![ + Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)), + Span::raw("=nav "), + Span::styled("a", Style::default().fg(Color::Green)), + Span::raw("=approve "), + Span::styled("r", Style::default().fg(Color::Red)), + Span::raw("=reject "), + Span::styled("g", Style::default().fg(Color::Yellow)), + Span::raw("=gen "), + Span::styled("s", Style::default().fg(Color::Magenta)), + Span::raw("=send "), + ]); + let help_area = Rect { + y: area.y + area.height - 1, + height: 1, + ..area + }; + frame.render_widget(Paragraph::new(help), help_area); + } +} + +fn render_detail(frame: &mut Frame, c: &FinetuneCandidate, area: Rect) { + let [header_area, content_area] = Layout::vertical([ + Constraint::Length(3), + Constraint::Min(1), + ]).areas(area); + + // Header: divergence, status + let alt_status = if c.alternate_text.is_some() { "yes" } else { "no" }; + let header = Paragraph::new(vec![ + Line::from(vec![ + Span::raw(" divergence: "), + Span::styled(format!("{:.3}", c.divergence), Style::default().fg(Color::Yellow)), + Span::raw(format!(" entry: {} alt: {}", c.entry_idx, alt_status)), + ]), + ]); + frame.render_widget(header, header_area); + + // Content: response and alternate (if available) + let content_block = Block::default() + .borders(Borders::TOP) + .title(" response "); + + let text = match &c.alternate_text { + Some(alt) => format!(" {}\n\n─── without memories ───\n\n {}", c.response_text, alt), + None => format!(" {}", c.response_text), + }; + + let content = Paragraph::new(text) + .block(content_block) + .wrap(Wrap { trim: false }); + frame.render_widget(content, content_area); +} + +fn truncate(s: &str, max: usize) -> String { + let first_line = s.lines().next().unwrap_or(""); + if first_line.len() > max { + format!("{}...", &first_line[..max]) + } else { + first_line.to_string() + } +} diff --git a/src/user/mod.rs b/src/user/mod.rs index 09e485f..f6991ba 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -5,11 +5,12 @@ pub(crate) mod chat; mod context; +pub(crate) mod learn; pub(crate) mod scroll_pane; pub mod selectable; mod subconscious; -mod unconscious; mod thalamus; +mod unconscious; mod widgets; use anyhow::Result; @@ -121,6 +122,8 @@ struct App { walked_count: usize, channel_status: Vec, idle_info: Option, + /// Fine-tuning candidates pending review. + finetune_candidates: Vec, } impl App { @@ -151,6 +154,24 @@ impl App { rebuild_tools_pending: false, walked_count: 0, channel_status: Vec::new(), idle_info: None, + finetune_candidates: Vec::new(), + } + } + + fn finetune_action(&mut self, idx: usize, status: learn::CandidateStatus) { + if let Some(candidate) = self.finetune_candidates.get_mut(idx) { + candidate.status = status; + } + } + + fn finetune_send_approved(&mut self) { + // TODO: Send approved candidates to /finetune endpoint + // For now, just mark them as sent and record as trained + for candidate in &mut self.finetune_candidates { + if candidate.status == learn::CandidateStatus::Approved { + crate::subconscious::learn::mark_trained(candidate.timestamp_ms); + candidate.status = learn::CandidateStatus::Sent; + } } } @@ -334,7 +355,7 @@ async fn run( } let notify_rx = crate::thalamus::channels::subscribe_all(); - // F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus + // F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus, F6=learn let mut screens: Vec> = vec![ Box::new(crate::user::chat::InteractScreen::new( mind.agent.clone(), mind.shared.clone(), mind_tx.clone(), @@ -343,6 +364,7 @@ async fn run( Box::new(crate::user::subconscious::SubconsciousScreen::new()), Box::new(crate::user::unconscious::UnconsciousScreen::new()), Box::new(crate::user::thalamus::ThalamusScreen::new()), + Box::new(crate::user::learn::LearnScreen::new()), ]; let mut active_screen: usize = 1; // F-key number tui::set_screen_legend(tui::screen_legend_from(&*screens)); @@ -433,7 +455,36 @@ async fn run( }; app.unconscious_state = unc.snapshots(store_guard.as_deref()); app.graph_health = unc.graph_health.clone(); - app.mind_state = Some(mind.shared.lock().unwrap().clone()); + let ms = mind.shared.lock().unwrap(); + // Sync finetune candidates: add new ones, keep existing (preserves approval status) + // Remove sent candidates (already trained, no need to keep) + // Keep only 10 most recent rejected candidates + app.finetune_candidates.retain(|c| c.status != learn::CandidateStatus::Sent); + for c in &ms.finetune_candidates { + let exists = app.finetune_candidates.iter() + .any(|existing| existing.timestamp_ms == c.timestamp_ms); + if !exists { + app.finetune_candidates.push(learn::FinetuneCandidate::from(c.clone())); + } + } + // Limit rejected candidates to 10 most recent + let mut rejected: Vec<_> = app.finetune_candidates.iter() + .enumerate() + .filter(|(_, c)| c.status == learn::CandidateStatus::Rejected) + .map(|(i, c)| (i, c.timestamp_ms)) + .collect(); + if rejected.len() > 10 { + rejected.sort_by_key(|(_, ts)| std::cmp::Reverse(*ts)); + let to_remove: std::collections::HashSet<_> = rejected[10..] + .iter().map(|(i, _)| *i).collect(); + let mut idx = 0; + app.finetune_candidates.retain(|_| { + let keep = !to_remove.contains(&idx); + idx += 1; + keep + }); + } + app.mind_state = Some(ms.clone()); } app.walked_count = mind.subconscious_walked().await.len(); if !startup_done { From 5d9d3ffc5b3d109ed26fd39d0eb51b605b0c96f1 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 00:34:00 -0400 Subject: [PATCH 14/94] learn: wire up /train endpoint for approved candidates When 's' is pressed on the learn screen, approved candidates are now sent to the inference server's /train endpoint. Samples are marked as sent immediately in the UI, and mark_trained() is called after successful API response to prevent re-scoring. Co-Authored-By: Proof of Concept --- src/subconscious/learn.rs | 66 +++++++++++++++++++++++++++++++++++++++ src/user/mod.rs | 31 ++++++++++++++++-- 2 files changed, 94 insertions(+), 3 deletions(-) diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index e775693..811db3a 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -648,3 +648,69 @@ pub fn node_timestamp_ms(node: &AstNode) -> Option { }?; Some(ts.timestamp_millis()) } + +// ── Training API ──────────────────────────────────────────────── + +/// Training sample for /train endpoint. +#[derive(serde::Serialize)] +struct TrainingSample { + context_ids: Vec, + continuation_ids: Vec, +} + +/// Data needed to send a training sample. +pub struct TrainData { + pub context_ids: Vec, + pub continuation_ids: Vec, + pub timestamp_ms: i64, +} + +/// Send training samples to the server. +/// +/// Returns job_id on success, marks each sample as trained. +pub async fn send_to_train( + samples: Vec, + client: &ApiClient, +) -> anyhow::Result { + if samples.is_empty() { + anyhow::bail!("no samples to train"); + } + + let api_samples: Vec = samples.iter() + .map(|s| TrainingSample { + context_ids: s.context_ids.clone(), + continuation_ids: s.continuation_ids.clone(), + }) + .collect(); + + let body = serde_json::json!({ + "training_data": { + "samples": api_samples, + } + }); + + let http = http_client(); + let url = format!("{}/train", client.base_url()); + let response = http.send_json("POST", &url, &[], &body).await?; + + let status = response.status(); + let result: serde_json::Value = response.json().await?; + + if !status.is_success() { + let msg = result.get("error").and_then(|e| e.as_str()).unwrap_or("unknown error"); + anyhow::bail!("train API HTTP {}: {}", status, msg); + } + + // Mark all samples as trained + for s in &samples { + mark_trained(s.timestamp_ms); + } + + let job_id = result.get("job_id") + .and_then(|j| j.as_str()) + .unwrap_or("unknown") + .to_string(); + + dbglog!("[finetune] sent {} samples, job_id={}", samples.len(), job_id); + Ok(job_id) +} diff --git a/src/user/mod.rs b/src/user/mod.rs index f6991ba..8577ec0 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -165,14 +165,39 @@ impl App { } fn finetune_send_approved(&mut self) { - // TODO: Send approved candidates to /finetune endpoint - // For now, just mark them as sent and record as trained + // Collect approved candidates + let samples: Vec = self.finetune_candidates.iter() + .filter(|c| c.status == learn::CandidateStatus::Approved) + .map(|c| crate::subconscious::learn::TrainData { + context_ids: c.context_ids.clone(), + continuation_ids: c.continuation_ids.clone(), + timestamp_ms: c.timestamp_ms, + }) + .collect(); + + if samples.is_empty() { + return; + } + + // Mark as sent in UI immediately for candidate in &mut self.finetune_candidates { if candidate.status == learn::CandidateStatus::Approved { - crate::subconscious::learn::mark_trained(candidate.timestamp_ms); candidate.status = learn::CandidateStatus::Sent; } } + + // Spawn async task to send to training server + let client = self.agent.client.clone(); + tokio::spawn(async move { + match crate::subconscious::learn::send_to_train(samples, &client).await { + Ok(job_id) => { + dbglog!("[finetune] training started: {}", job_id); + } + Err(e) => { + dbglog!("[finetune] send failed: {:#}", e); + } + } + }); } From 2b632d568b4b18c461168a1c3bf737b50e5df4d6 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 11:48:37 -0400 Subject: [PATCH 15/94] learn: nanosecond timestamps, token ranges for /score MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two related changes to the learn subsystem: 1. AST node timestamps are now non-optional — both Leaf and Branch variants carry a DateTime. UNIX_EPOCH means "unset" (old entries deserialized from on-disk conversation logs). Training uses timestamps as unique keys for dedup, so we promote to nanosecond precision: node_timestamp_ns(), TrainData.timestamp_ns, FinetuneCandidate.timestamp_ns, mark_trained(ns). 2. build_token_ids() now also returns token-position ranges of assistant messages. These are passed to vLLM's /score endpoint via the new score_ranges field so only scored-position logprobs are returned — cuts bandwidth/compute when scoring small windows. Co-Authored-By: Proof of Concept --- src/agent/context.rs | 82 ++++++++++++++++++++++++++++++++++----- src/mind/log.rs | 6 +-- src/subconscious/learn.rs | 74 ++++++++++++++++++++++------------- src/user/learn.rs | 6 +-- src/user/mod.rs | 6 +-- 5 files changed, 130 insertions(+), 44 deletions(-) diff --git a/src/agent/context.rs b/src/agent/context.rs index c43c023..5a90ad0 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -85,6 +85,19 @@ pub enum NodeBody { Log(String), } +fn default_timestamp() -> DateTime { + DateTime::UNIX_EPOCH +} + +/// Deserialize timestamp, treating both missing and null as UNIX_EPOCH. +fn deserialize_timestamp_or_epoch<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + let opt: Option> = Option::deserialize(deserializer)?; + Ok(opt.unwrap_or(DateTime::UNIX_EPOCH)) +} + /// A leaf node: typed content with cached token IDs. /// Token IDs are not serialized — they're recomputed on deserialization. #[derive(Debug, Clone, Serialize)] @@ -92,7 +105,7 @@ pub struct NodeLeaf { body: NodeBody, #[serde(skip)] token_ids: Vec, - timestamp: Option>, + timestamp: DateTime, } impl<'de> Deserialize<'de> for NodeLeaf { @@ -100,7 +113,8 @@ impl<'de> Deserialize<'de> for NodeLeaf { #[derive(Deserialize)] struct Raw { body: NodeBody, - timestamp: Option>, + #[serde(default = "default_timestamp", deserialize_with = "deserialize_timestamp_or_epoch")] + timestamp: DateTime, } let raw = Raw::deserialize(deserializer)?; let token_ids = if raw.body.is_prompt_visible() { @@ -119,6 +133,8 @@ pub enum AstNode { Branch { role: Role, children: Vec, + #[serde(default = "default_timestamp", deserialize_with = "deserialize_timestamp_or_epoch")] + timestamp: DateTime, /// Per-response memory attribution from full scoring matrix. /// Maps memory key → divergence score for this response. #[serde(default, skip_serializing_if = "std::collections::BTreeMap::is_empty")] @@ -252,18 +268,18 @@ impl NodeLeaf { } else { vec![] }; - Self { body, token_ids, timestamp: None } + Self { body, token_ids, timestamp: Utc::now() } } pub fn with_timestamp(mut self, ts: DateTime) -> Self { - self.timestamp = Some(ts); + self.timestamp = ts; self } pub fn body(&self) -> &NodeBody { &self.body } pub fn token_ids(&self) -> &[u32] { &self.token_ids } pub fn tokens(&self) -> usize { self.token_ids.len() } - pub fn timestamp(&self) -> Option> { self.timestamp } + pub fn timestamp(&self) -> DateTime { self.timestamp } } impl AstNode { @@ -307,13 +323,14 @@ impl AstNode { // -- Branch constructors -------------------------------------------------- pub fn branch(role: Role, children: Vec) -> Self { - Self::Branch { role, children, memory_scores: Default::default() } + Self::Branch { role, children, timestamp: Utc::now(), memory_scores: Default::default() } } pub fn system_msg(text: impl Into) -> Self { Self::Branch { role: Role::System, children: vec![Self::content(text)], + timestamp: Utc::now(), memory_scores: Default::default(), } } @@ -322,6 +339,7 @@ impl AstNode { Self::Branch { role: Role::User, children: vec![Self::content(text)], + timestamp: Utc::now(), memory_scores: Default::default(), } } @@ -338,9 +356,10 @@ impl AstNode { }; Self::Leaf(NodeLeaf { token_ids, ..leaf }) } - Self::Branch { role, children, memory_scores, .. } => Self::Branch { + Self::Branch { role, children, timestamp, memory_scores } => Self::Branch { role, children: children.into_iter().map(|c| c.retokenize()).collect(), + timestamp, memory_scores, }, } @@ -348,8 +367,8 @@ impl AstNode { pub fn with_timestamp(mut self, ts: DateTime) -> Self { match &mut self { - Self::Leaf(leaf) => leaf.timestamp = Some(ts), - Self::Branch { .. } => {} + Self::Leaf(leaf) => leaf.timestamp = ts, + Self::Branch { timestamp, .. } => *timestamp = ts, } self } @@ -1340,4 +1359,49 @@ mod tests { assert_token_invariants(node); assert!(node.tokens() > 0); } + + // -- Timestamp deserialization tests ------------------------------------------ + + #[test] + fn test_timestamp_null_becomes_epoch() { + // Old conversation.jsonl entries have "timestamp":null + // serde(default) only handles missing fields, not explicit nulls. + // We need to verify our deserialize handles this correctly. + let json = r#"{"Leaf":{"body":{"Content":"hello"},"timestamp":null}}"#; + let node: AstNode = serde_json::from_str(json).unwrap(); + let leaf = node.leaf().unwrap(); + assert_eq!(leaf.timestamp(), DateTime::::UNIX_EPOCH); + } + + #[test] + fn test_timestamp_missing_becomes_epoch() { + let json = r#"{"Leaf":{"body":{"Content":"hello"}}}"#; + let node: AstNode = serde_json::from_str(json).unwrap(); + let leaf = node.leaf().unwrap(); + assert_eq!(leaf.timestamp(), DateTime::::UNIX_EPOCH); + } + + #[test] + fn test_branch_timestamp_null_becomes_epoch() { + let json = r#"{"Branch":{"role":"User","children":[{"Leaf":{"body":{"Content":"hi"}}}],"timestamp":null}}"#; + let node: AstNode = serde_json::from_str(json).unwrap(); + match node { + AstNode::Branch { timestamp, .. } => { + assert_eq!(timestamp, DateTime::::UNIX_EPOCH); + } + _ => panic!("expected Branch"), + } + } + + #[test] + fn test_branch_timestamp_missing_becomes_epoch() { + let json = r#"{"Branch":{"role":"User","children":[{"Leaf":{"body":{"Content":"hi"}}}]}}"#; + let node: AstNode = serde_json::from_str(json).unwrap(); + match node { + AstNode::Branch { timestamp, .. } => { + assert_eq!(timestamp, DateTime::::UNIX_EPOCH); + } + _ => panic!("expected Branch"), + } + } } diff --git a/src/mind/log.rs b/src/mind/log.rs index b69f2ca..03e349c 100644 --- a/src/mind/log.rs +++ b/src/mind/log.rs @@ -55,15 +55,15 @@ impl ConversationLog { } pub fn oldest_timestamp(&self) -> Option> { - // Read forward from the start to find first timestamp + // Read forward from the start to find first non-epoch timestamp let file = File::open(&self.path).ok()?; let mmap = unsafe { Mmap::map(&file).ok()? }; - // Find first { ... } and parse for line in mmap.split(|&b| b == b'\n') { if line.is_empty() { continue; } if let Ok(node) = serde_json::from_slice::(line) { if let Some(leaf) = node.leaf() { - if let Some(ts) = leaf.timestamp() { + let ts = leaf.timestamp(); + if ts != chrono::DateTime::UNIX_EPOCH { return Some(ts); } } diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index 811db3a..f8070ce 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -53,13 +53,18 @@ fn is_assistant(node: &AstNode) -> bool { /// /// Includes all sections up to and including conversation entries in /// `range`, with `filter` applied to conversation entries. +/// +/// Returns (token_ids, assistant_ranges) where assistant_ranges are +/// (start, end) token positions for each assistant message. fn build_token_ids( context: &ContextState, range: std::ops::Range, filter: Filter, -) -> Vec { +) -> (Vec, Vec<(usize, usize)>) { use crate::agent::context::Ast; let mut ids = Vec::new(); + let mut assistant_ranges = Vec::new(); + for node in context.system() { ids.extend(node.token_ids()); } @@ -87,9 +92,16 @@ fn build_token_ids( Filter::SkipAllMemories => is_memory(node), }; if skip { continue; } + + // Track assistant message boundaries + let is_asst = is_assistant(node); + let start = ids.len(); ids.extend(node.token_ids()); + if is_asst { + assistant_ranges.push((start, ids.len())); + } } - ids + (ids, assistant_ranges) } // ── Score API ─────────────────────────────────────────────────── @@ -114,6 +126,7 @@ async fn call_score( http: &crate::agent::api::http::HttpClient, client: &ApiClient, prompt: &[u32], + ranges: &[(usize, usize)], priority: Option, ) -> anyhow::Result> { let url = format!("{}/score", client.base_url()); @@ -123,6 +136,9 @@ async fn call_score( "prompt": prompt, "logprobs": 1, }); + if !ranges.is_empty() { + body["score_ranges"] = serde_json::json!(ranges); + } if let Some(p) = priority { body["priority"] = serde_json::json!(p); } @@ -168,8 +184,10 @@ async fn score_divergence( filter: Filter<'_>, priority: Option, ) -> anyhow::Result<(Vec, Vec)> { - let baseline = call_score(http, client, &build_token_ids(context, range.clone(), Filter::None), priority).await?; - let without = call_score(http, client, &build_token_ids(context, range, filter), priority).await?; + let (baseline_tokens, baseline_ranges) = build_token_ids(context, range.clone(), Filter::None); + let (without_tokens, without_ranges) = build_token_ids(context, range, filter); + let baseline = call_score(http, client, &baseline_tokens, &baseline_ranges, priority).await?; + let without = call_score(http, client, &without_tokens, &without_ranges, priority).await?; let divs = divergence(&baseline, &without); Ok((divs, baseline)) } @@ -208,21 +226,21 @@ pub async fn score_memories( let http = http_client(); let activity = crate::agent::start_activity(agent, "scoring: baseline").await; - let baseline_tokens = { + let (baseline_tokens, baseline_ranges) = { let ctx = agent.context.lock().await; build_token_ids(&ctx, 0..ctx.conversation().len(), Filter::None) }; - let baseline = call_score(&http, client, &baseline_tokens, Some(5)).await?; + let baseline = call_score(&http, client, &baseline_tokens, &baseline_ranges, Some(5)).await?; dbglog!("[scoring-full] baseline done ({} response scores)", baseline.len()); for (mem_idx, key) in memory_keys.iter().enumerate() { activity.update(format!("scoring: {}/{}", mem_idx + 1, total)).await; dbglog!("[scoring-full] {}/{}: {}", mem_idx + 1, total, key); - let tokens = { + let (tokens, ranges) = { let ctx = agent.context.lock().await; build_token_ids(&ctx, 0..ctx.conversation().len(), Filter::SkipKey(key)) }; - let row = match call_score(&http, client, &tokens, Some(5)).await { + let row = match call_score(&http, client, &tokens, &ranges, Some(5)).await { Ok(without) => { let divs = divergence(&baseline, &without); let max_div = divs.iter().cloned().fold(0.0f64, f64::max); @@ -466,8 +484,8 @@ pub struct FinetuneCandidate { pub continuation_ids: Vec, /// What the model would have said without memories (if generated). pub alternate_text: Option, - /// Timestamp in millis for tracking trained status. - pub timestamp_ms: i64, + /// Timestamp in nanos — used as unique key for trained-set dedup. + pub timestamp_ns: i64, } /// Score and enrich finetune candidates with full context. @@ -495,7 +513,7 @@ pub async fn score_finetune_candidates( let node = &entries[entry_idx]; // Get timestamp and skip if already trained - let timestamp_ms = match node_timestamp_ms(node) { + let timestamp_ns = match node_timestamp_ns(node) { Some(ts) => { if trained.contains(&ts) { continue; // Already trained, skip @@ -520,7 +538,7 @@ pub async fn score_finetune_candidates( }; // Build token IDs: context = everything before response, continuation = response - let context_ids = build_token_ids(context, 0..entry_idx, Filter::None); + let (context_ids, _) = build_token_ids(context, 0..entry_idx, Filter::None); let continuation_ids: Vec = node.token_ids().into_iter().collect(); candidates.push(FinetuneCandidate { @@ -530,7 +548,7 @@ pub async fn score_finetune_candidates( context_ids, continuation_ids, alternate_text: None, - timestamp_ms, + timestamp_ns, }); } @@ -556,7 +574,7 @@ async fn generate_alternate( use crate::agent::api::{SamplingParams, StreamToken}; // Build context tokens without memories, up to the response - let mut prompt = build_token_ids(context, 0..entry_idx, Filter::SkipAllMemories); + let (mut prompt, _) = build_token_ids(context, 0..entry_idx, Filter::SkipAllMemories); // Add assistant turn start prompt.push(tokenizer::IM_START); @@ -616,7 +634,7 @@ pub fn set_alternates(enabled: bool) { } } -/// Load set of trained response timestamps (millis since epoch). +/// Load set of trained response timestamps (nanos since epoch). pub fn load_trained() -> HashSet { let path = trained_path(); match std::fs::read_to_string(&path) { @@ -626,9 +644,9 @@ pub fn load_trained() -> HashSet { } /// Mark a response as trained by its timestamp. -pub fn mark_trained(timestamp_ms: i64) { +pub fn mark_trained(timestamp_ns: i64) { let mut trained = load_trained(); - trained.insert(timestamp_ms); + trained.insert(timestamp_ns); let path = trained_path(); if let Some(parent) = path.parent() { let _ = std::fs::create_dir_all(parent); @@ -638,15 +656,19 @@ pub fn mark_trained(timestamp_ms: i64) { } } -/// Get timestamp in millis from an AstNode (for Branch, uses first child). -pub fn node_timestamp_ms(node: &AstNode) -> Option { +/// Get timestamp in nanoseconds from an AstNode. +/// Returns None for entries with default UNIX_EPOCH timestamp (old data) +/// or timestamps outside the representable nano range (pre-1677 or post-2262). +pub fn node_timestamp_ns(node: &AstNode) -> Option { let ts = match node { AstNode::Leaf(leaf) => leaf.timestamp(), - AstNode::Branch { children, .. } => { - children.first()?.leaf()?.timestamp() - } - }?; - Some(ts.timestamp_millis()) + AstNode::Branch { timestamp, .. } => *timestamp, + }; + if ts == chrono::DateTime::UNIX_EPOCH { + None // Old entry without real timestamp + } else { + ts.timestamp_nanos_opt() + } } // ── Training API ──────────────────────────────────────────────── @@ -662,7 +684,7 @@ struct TrainingSample { pub struct TrainData { pub context_ids: Vec, pub continuation_ids: Vec, - pub timestamp_ms: i64, + pub timestamp_ns: i64, } /// Send training samples to the server. @@ -703,7 +725,7 @@ pub async fn send_to_train( // Mark all samples as trained for s in &samples { - mark_trained(s.timestamp_ms); + mark_trained(s.timestamp_ns); } let job_id = result.get("job_id") diff --git a/src/user/learn.rs b/src/user/learn.rs index 35b26b2..f858f34 100644 --- a/src/user/learn.rs +++ b/src/user/learn.rs @@ -31,8 +31,8 @@ pub struct FinetuneCandidate { pub continuation_ids: Vec, /// What the model would have said without memories (if generated). pub alternate_text: Option, - /// Timestamp in millis for tracking trained status. - pub timestamp_ms: i64, + /// Timestamp in nanos — used as unique key for trained-set dedup. + pub timestamp_ns: i64, } #[derive(Clone, Debug, PartialEq)] @@ -53,7 +53,7 @@ impl From for FinetuneCandidate { context_ids: c.context_ids, continuation_ids: c.continuation_ids, alternate_text: c.alternate_text, - timestamp_ms: c.timestamp_ms, + timestamp_ns: c.timestamp_ns, } } } diff --git a/src/user/mod.rs b/src/user/mod.rs index 8577ec0..edd2b00 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -171,7 +171,7 @@ impl App { .map(|c| crate::subconscious::learn::TrainData { context_ids: c.context_ids.clone(), continuation_ids: c.continuation_ids.clone(), - timestamp_ms: c.timestamp_ms, + timestamp_ns: c.timestamp_ns, }) .collect(); @@ -487,7 +487,7 @@ async fn run( app.finetune_candidates.retain(|c| c.status != learn::CandidateStatus::Sent); for c in &ms.finetune_candidates { let exists = app.finetune_candidates.iter() - .any(|existing| existing.timestamp_ms == c.timestamp_ms); + .any(|existing| existing.timestamp_ns == c.timestamp_ns); if !exists { app.finetune_candidates.push(learn::FinetuneCandidate::from(c.clone())); } @@ -496,7 +496,7 @@ async fn run( let mut rejected: Vec<_> = app.finetune_candidates.iter() .enumerate() .filter(|(_, c)| c.status == learn::CandidateStatus::Rejected) - .map(|(i, c)| (i, c.timestamp_ms)) + .map(|(i, c)| (i, c.timestamp_ns)) .collect(); if rejected.len() > 10 { rejected.sort_by_key(|(_, ts)| std::cmp::Reverse(*ts)); From ac40c2cb986681026d1f972d2977662651fa364a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 11:48:47 -0400 Subject: [PATCH 16/94] config_writer: json5 round-trip editing via json-five MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Surgical edits to ~/.consciousness/config.json5 that preserve comments, whitespace, trailing commas, and unquoted identifier keys on round-trip. Uses json-five's rt::parser module — a real JSON5 parser with AST mutation + faithful serialization back. set_scalar(section, key, literal) locates or creates the target, replaces the value; set_learn_threshold is a convenience for the common F-screen use case. Co-Authored-By: Proof of Concept --- Cargo.lock | 17 +++ Cargo.toml | 1 + src/config_writer.rs | 331 +++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 1 + 4 files changed, 350 insertions(+) create mode 100644 src/config_writer.rs diff --git a/Cargo.lock b/Cargo.lock index eb53ed5..b474289 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -492,6 +492,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", + "json-five", "json5", "libc", "log", @@ -1531,6 +1532,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-five" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "865f2d01a4549c1fd8c60640c03ae5249eb374cd8cde8b905628d4b1af95c87c" +dependencies = [ + "serde", + "unicode-general-category", +] + [[package]] name = "json5" version = "1.3.1" @@ -3384,6 +3395,12 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" +[[package]] +name = "unicode-general-category" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b993bddc193ae5bd0d623b49ec06ac3e9312875fdae725a975c51db1cc1677f" + [[package]] name = "unicode-ident" version = "1.0.24" diff --git a/Cargo.toml b/Cargo.toml index c253bd7..a722ad2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ log = "0.4" serde = { version = "1", features = ["derive"] } serde_json = "1" json5 = "1.3" +json-five = "0.3" ratatui = { version = "0.30", features = ["unstable-rendered-line-info"] } tui-markdown = { git = "https://github.com/koverstreet/tui-markdown", subdirectory = "tui-markdown" } diff --git a/src/config_writer.rs b/src/config_writer.rs new file mode 100644 index 0000000..65401b9 --- /dev/null +++ b/src/config_writer.rs @@ -0,0 +1,331 @@ +// config_writer.rs — Surgical edits to ~/.consciousness/config.json5 +// +// Uses json-five's round-trip parser to mutate specific fields while +// preserving the surrounding comments, whitespace, and formatting. + +use std::path::Path; + +use anyhow::{anyhow, Context as _, Result}; +use json_five::rt::parser::{ + from_str, JSONKeyValuePair, JSONObjectContext, JSONValue, KeyValuePairContext, +}; + +use crate::config::config_path; + +/// Read the config, apply `mutate` to the root JSONValue, write it back atomically. +fn edit_config Result<()>>(mutate: F) -> Result<()> { + let path = config_path(); + let src = std::fs::read_to_string(&path) + .with_context(|| format!("read {}", path.display()))?; + + let mut text = from_str(&src) + .map_err(|e| anyhow!("parse {}: {}", path.display(), e))?; + mutate(&mut text.value)?; + + write_atomic(&path, &text.to_string()) +} + +fn write_atomic(path: &Path, content: &str) -> Result<()> { + let parent = path.parent() + .ok_or_else(|| anyhow!("config path has no parent: {}", path.display()))?; + let tmp = parent.join(format!( + ".{}.tmp", + path.file_name().unwrap_or_default().to_string_lossy(), + )); + std::fs::write(&tmp, content) + .with_context(|| format!("write {}", tmp.display()))?; + std::fs::rename(&tmp, path) + .with_context(|| format!("rename {} -> {}", tmp.display(), path.display()))?; + Ok(()) +} + +/// Match a key JSONValue against a string name. JSON5 allows keys to be +/// unquoted identifiers or single/double-quoted strings. +fn key_matches(key: &JSONValue, name: &str) -> bool { + match key { + JSONValue::Identifier(s) + | JSONValue::DoubleQuotedString(s) + | JSONValue::SingleQuotedString(s) => s == name, + _ => false, + } +} + +/// Find (or create) a child object under `parent`, returning a mutable borrow +/// of its key_value_pairs vector. +fn get_or_create_object<'a>( + parent: &'a mut JSONValue, + section: &str, +) -> Result<&'a mut Vec> { + let pairs = match parent { + JSONValue::JSONObject { key_value_pairs, .. } => key_value_pairs, + _ => return Err(anyhow!("config root is not an object")), + }; + + // Separate the lookup from the mutable borrow we return — needed to + // satisfy the borrow checker when we create a new entry. + let idx = pairs.iter().position(|kvp| key_matches(&kvp.key, section)); + + let idx = match idx { + Some(i) => i, + None => { + pairs.push(JSONKeyValuePair { + key: JSONValue::Identifier(section.to_string()), + value: JSONValue::JSONObject { + key_value_pairs: Vec::new(), + context: Some(JSONObjectContext { + wsc: (String::new(),), + }), + }, + context: Some(KeyValuePairContext { + wsc: ( + String::from("\n\n "), // whitespace before ':' + String::from(" "), // whitespace after ':' + String::new(), // whitespace after value + Some(String::new()), // whitespace after trailing comma + ), + }), + }); + pairs.len() - 1 + } + }; + + match &mut pairs[idx].value { + JSONValue::JSONObject { key_value_pairs, .. } => Ok(key_value_pairs), + _ => Err(anyhow!("config key '{}' is not an object", section)), + } +} + +/// Set `section.key` to a literal scalar value (e.g., "1e-7", "42", "true"). +/// The literal is parsed as JSON5 so we preserve its source-form on round-trip. +pub fn set_scalar(section: &str, key: &str, literal: &str) -> Result<()> { + let value = parse_scalar_literal(literal)?; + edit_config(|root| { + let pairs = get_or_create_object(root, section)?; + + if let Some(kvp) = pairs.iter_mut().find(|k| key_matches(&k.key, key)) { + kvp.value = value; + return Ok(()); + } + + pairs.push(JSONKeyValuePair { + key: JSONValue::Identifier(key.to_string()), + value, + context: Some(KeyValuePairContext { + wsc: ( + String::from("\n "), + String::from(" "), + String::new(), + Some(String::new()), + ), + }), + }); + Ok(()) + }) +} + +/// Parse a scalar literal by round-tripping it through json-five. Keeps us +/// consistent with whatever scalars the library considers valid (hex, +/// exponents, Infinity, etc.). +fn parse_scalar_literal(literal: &str) -> Result { + let text = from_str(literal) + .map_err(|e| anyhow!("parse literal {:?}: {}", literal, e))?; + match text.value { + JSONValue::JSONObject { .. } | JSONValue::JSONArray { .. } => { + Err(anyhow!("set_scalar only accepts scalar literals, got {:?}", literal)) + } + v => Ok(v), + } +} + +/// Convenience: set `learn.threshold` to the given f64. +pub fn set_learn_threshold(value: f64) -> Result<()> { + // {:e} gives the minimal scientific notation that preserves the value. + set_scalar("learn", "threshold", &format!("{:e}", value)) +} + +#[cfg(test)] +mod tests { + use super::*; + + // In-memory variant of set_scalar — used to test the mutation logic + // without touching disk. + fn set_scalar_inline( + root: &mut JSONValue, + section: &str, + key: &str, + literal: &str, + ) -> Result<()> { + let value = parse_scalar_literal(literal)?; + let pairs = get_or_create_object(root, section)?; + if let Some(kvp) = pairs.iter_mut().find(|k| key_matches(&k.key, key)) { + kvp.value = value; + return Ok(()); + } + pairs.push(JSONKeyValuePair { + key: JSONValue::Identifier(key.to_string()), + value, + context: Some(KeyValuePairContext { + wsc: ( + String::from("\n "), + String::from(" "), + String::new(), + Some(String::new()), + ), + }), + }); + Ok(()) + } + + fn edit_str Result<()>>(src: &str, f: F) -> Result { + let mut text = from_str(src).map_err(|e| anyhow!("{}", e))?; + f(&mut text.value)?; + Ok(text.to_string()) + } + + #[test] + fn replaces_existing_scalar() { + let src = r#"{ + // threshold for learning + learn: { + threshold: 0.001, // the old value + }, +}"#; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "threshold", "1e-7") + }).unwrap(); + assert!(out.contains("1e-7"), "output: {}", out); + assert!(out.contains("// threshold for learning")); + assert!(out.contains("// the old value")); + assert!(!out.contains("0.001")); + } + + #[test] + fn creates_missing_section() { + let src = r#"{ + // comment + memory: { user_name: "Kent" }, +}"#; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "threshold", "1e-7") + }).unwrap(); + assert!(out.contains("learn")); + assert!(out.contains("1e-7")); + assert!(out.contains("// comment")); + assert!(out.contains(r#"user_name: "Kent""#)); + } + + #[test] + fn preserves_comments_in_siblings() { + let src = r#"{ + memory: { + // sensitive setting + user_name: "Kent", // name + }, + learn: { + threshold: 0.5, + }, +}"#; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "threshold", "1e-9") + }).unwrap(); + assert!(out.contains("// sensitive setting")); + assert!(out.contains("// name")); + assert!(out.contains("1e-9")); + assert!(!out.contains("0.5")); + } + + #[test] + fn adds_key_to_existing_empty_section() { + let src = r#"{ + learn: {}, +}"#; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "threshold", "42") + }).unwrap(); + assert!(out.contains("threshold"), "output: {}", out); + assert!(out.contains("42")); + } + + #[test] + fn realistic_config_adds_learn_section() { + // Mirrors the shape of ~/.consciousness/config.json5 — multiple + // sections, comments, mixed tab/space indent, trailing commas. + let src = r#"{ + deepinfra: { + api_key: "bcachefs-agents-2026", + base_url: "http://example/v1", + }, + + // Named models + models: { + "27b": { + backend: "deepinfra", + model_id: "Qwen/Qwen3.5-27B", + }, + }, + + default_model: "27b", + + memory: { + user_name: "Kent", + // Active agent types + agent_types: ["linker", "organize"], + }, + + compaction: { + hard_threshold_pct: 90, + }, +}"#; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "threshold", "1e-7") + }).unwrap(); + + // Core assertions: comments and sibling sections survive. + assert!(out.contains(r#"api_key: "bcachefs-agents-2026""#)); + assert!(out.contains("// Named models")); + assert!(out.contains("// Active agent types")); + assert!(out.contains(r#"user_name: "Kent""#)); + assert!(out.contains("hard_threshold_pct: 90")); + + // New section added. + assert!(out.contains("learn")); + assert!(out.contains("1e-7")); + + // Parse result should parse back without error (real json5 parser). + let reparsed: serde_json::Value = json5::from_str(&out) + .expect("mutated output must be valid JSON5"); + let threshold = reparsed.pointer("/learn/threshold").expect("learn.threshold exists"); + assert_eq!(threshold.as_f64(), Some(1e-7)); + } + + #[test] + fn realistic_config_updates_existing_threshold() { + let src = r#"{ + learn: { + // The divergence threshold + threshold: 0.001, + }, + memory: { user_name: "Kent" }, +}"#; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "threshold", "5e-8") + }).unwrap(); + assert!(out.contains("5e-8")); + assert!(!out.contains("0.001")); + assert!(out.contains("// The divergence threshold")); + + let reparsed: serde_json::Value = json5::from_str(&out).unwrap(); + assert_eq!(reparsed.pointer("/learn/threshold").and_then(|v| v.as_f64()), Some(5e-8)); + } + + #[test] + fn roundtrip_stable_without_change() { + let src = r#"{ + // heading + a: 1, + b: { c: 2 }, // inline +}"#; + let text = from_str(src).unwrap(); + assert_eq!(text.to_string(), src); + } +} diff --git a/src/lib.rs b/src/lib.rs index 1a71735..e6411e3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -42,6 +42,7 @@ pub mod subconscious; // Unified configuration pub mod config; +pub mod config_writer; // Session state pub mod session; From e5dd8312c791e9f90102f39cb3bec3805e345739 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 11:49:26 -0400 Subject: [PATCH 17/94] =?UTF-8?q?learn:=20F6=20screen=20=E2=80=94=20scorin?= =?UTF-8?q?g=20stats,=20ActivityGuard,=20configurable=20threshold?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three changes that together reshape the F6 fine-tune-review screen: 1. Finetune scoring reports through the standard agent activity system instead of a separate finetune_progress String. The previous design ran an independent progress field that forced a cross-lock dance and bespoke UI plumbing. start_finetune_scoring now uses start_activity + activity.update, so the usual status line and notifications capture scoring progress uniformly with other background work. 2. MindState gains a FinetuneScoringStats snapshot (responses seen, above threshold, max divergence, error). The F6 empty screen shows this instead of a loading message — so after a scoring run that produced zero candidates, you can see *why* (e.g., max_divergence below threshold). 3. The divergence threshold is configurable from F6 via +/- hotkeys (scales by 10×) and persisted to ~/.consciousness/config.json5 via config_writer::set_learn_threshold. AppConfig grows a learn section with a threshold field (default 1e-7). Also: user/mod.rs no longer uses try_lock() for the per-tick unconscious/mind state sync — we fixed the locking hot paths that made try_lock necessary, so lock().await is now the right choice. And subconscious::learn::score_finetune_candidates now returns (candidates, max_divergence) so the stats can be populated. Co-Authored-By: Proof of Concept --- src/config.rs | 19 ++++ src/mind/mod.rs | 102 ++++++++++++++++++---- src/subconscious/learn.rs | 10 ++- src/user/learn.rs | 176 +++++++++++++++++++++++++------------- src/user/mod.rs | 15 ++-- 5 files changed, 237 insertions(+), 85 deletions(-) diff --git a/src/config.rs b/src/config.rs index 9f9ad9a..3cd9b55 100644 --- a/src/config.rs +++ b/src/config.rs @@ -252,6 +252,8 @@ pub struct AppConfig { pub debug: bool, pub compaction: CompactionConfig, pub dmn: DmnConfig, + #[serde(default)] + pub learn: LearnConfig, #[serde(skip_serializing_if = "Option::is_none")] pub memory_project: Option, #[serde(default)] @@ -323,6 +325,22 @@ pub struct DmnConfig { pub max_turns: u32, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LearnConfig { + /// Divergence threshold — responses scoring above this become + /// fine-tuning candidates. Lower = more sensitive. + #[serde(default = "default_learn_threshold")] + pub threshold: f64, +} + +fn default_learn_threshold() -> f64 { 0.0000001 } + +impl Default for LearnConfig { + fn default() -> Self { + Self { threshold: default_learn_threshold() } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ModelConfig { /// Backend name ("anthropic" or "openrouter") @@ -366,6 +384,7 @@ impl Default for AppConfig { soft_threshold_pct: 80, }, dmn: DmnConfig { max_turns: 20 }, + learn: LearnConfig::default(), memory_project: None, models: HashMap::new(), default_model: String::new(), diff --git a/src/mind/mod.rs b/src/mind/mod.rs index a3a37f4..81bcb09 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -149,8 +149,26 @@ pub struct MindState { pub unc_idle_deadline: Instant, /// Fine-tuning candidates identified by scoring. pub finetune_candidates: Vec, - /// Fine-tune scoring progress (empty = not running). - pub finetune_progress: String, + /// Last scoring run stats for UI display. + pub finetune_last_run: Option, + /// Divergence threshold for finetune scoring — mutable via F6 hotkeys + /// and persisted back to ~/.consciousness/config.json5. + pub learn_threshold: f64, +} + +/// Stats from the last finetune scoring run. +#[derive(Clone, Debug)] +pub struct FinetuneScoringStats { + /// Count of assistant responses we considered (recent half of context). + pub responses_considered: usize, + /// How many exceeded the divergence threshold. + pub above_threshold: usize, + /// Threshold used for this run. + pub threshold: f64, + /// Highest divergence observed. + pub max_divergence: f64, + /// Error message if the run failed. + pub error: Option, } impl Clone for MindState { @@ -170,7 +188,8 @@ impl Clone for MindState { unc_idle: self.unc_idle, unc_idle_deadline: self.unc_idle_deadline, finetune_candidates: self.finetune_candidates.clone(), - finetune_progress: self.finetune_progress.clone(), + finetune_last_run: self.finetune_last_run.clone(), + learn_threshold: self.learn_threshold, } } } @@ -185,6 +204,8 @@ pub enum MindCommand { ScoreFull, /// Score for finetune candidates ScoreFinetune, + /// Update the finetune divergence threshold and persist to config. + SetLearnThreshold(f64), /// Abort current turn, kill processes Interrupt, /// Reset session @@ -194,7 +215,7 @@ pub enum MindCommand { } impl MindState { - pub fn new(max_dmn_turns: u32) -> Self { + pub fn new(max_dmn_turns: u32, learn_threshold: f64) -> Self { Self { input: Vec::new(), turn_active: false, @@ -211,7 +232,8 @@ impl MindState { unc_idle: false, unc_idle_deadline: Instant::now() + std::time::Duration::from_secs(60), finetune_candidates: Vec::new(), - finetune_progress: String::new(), + finetune_last_run: None, + learn_threshold, } } @@ -341,7 +363,10 @@ impl Mind { crate::agent::tools::tools(), ).await; - let shared = Arc::new(std::sync::Mutex::new(MindState::new(config.app.dmn.max_turns))); + let shared = Arc::new(std::sync::Mutex::new(MindState::new( + config.app.dmn.max_turns, + config.app.learn.threshold, + ))); let (turn_watch, _) = tokio::sync::watch::channel(false); let (conscious_active, _) = tokio::sync::watch::channel(false); let (bg_tx, bg_rx) = mpsc::unbounded_channel(); @@ -543,6 +568,12 @@ impl Mind { MindCommand::ScoreFinetune => { self.start_finetune_scoring(); } + MindCommand::SetLearnThreshold(value) => { + self.shared.lock().unwrap().learn_threshold = value; + if let Err(e) = crate::config_writer::set_learn_threshold(value) { + dbglog!("[learn] failed to persist threshold {}: {:#}", value, e); + } + } } } } @@ -618,27 +649,60 @@ impl Mind { } /// Score responses for fine-tuning candidates. + /// + /// Scores the most recent half of the context — responses near the end + /// of the context window were generated with the most context available, + /// which is what we want to train on. The threshold is a temporary knob; + /// once this runs continuously, we'll just train whatever lands at full + /// context without filtering. pub fn start_finetune_scoring(&self) { + let threshold = self.shared.lock().unwrap().learn_threshold; + let agent = self.agent.clone(); let bg_tx = self.bg_tx.clone(); let shared = self.shared.clone(); - shared.lock().unwrap().finetune_progress = "scoring...".into(); tokio::spawn(async move { + let activity = crate::agent::start_activity(&agent, "finetune: scoring...").await; + let (context, client) = { let ctx = agent.context.lock().await; (ctx.clone(), agent.client.clone()) }; - // Min divergence 0.1 = only keep responses that differ meaningfully - match learn::score_finetune_candidates(&context, 20, &client, 0.1).await { - Ok(candidates) => { - dbglog!("[finetune] found {} candidates", candidates.len()); + + let entries = context.conversation(); + let score_count = entries.len() / 2; + let range_start = entries.len() - score_count; + let responses_considered: usize = entries[range_start..].iter() + .filter(|n| matches!(n, crate::agent::context::AstNode::Branch { role: crate::agent::context::Role::Assistant, .. })) + .count(); + + activity.update(format!("finetune: scoring {} responses...", responses_considered)).await; + + let stats = match learn::score_finetune_candidates( + &context, score_count, &client, threshold, + ).await { + Ok((candidates, max_div)) => { + let above_threshold = candidates.len(); let _ = bg_tx.send(BgEvent::FinetuneCandidates(candidates)); + FinetuneScoringStats { + responses_considered, + above_threshold, + threshold, + max_divergence: max_div, + error: None, + } } - Err(e) => { - dbglog!("[finetune] scoring FAILED: {:#}", e); - } - } - shared.lock().unwrap().finetune_progress.clear(); + Err(e) => FinetuneScoringStats { + responses_considered, + above_threshold: 0, + threshold, + max_divergence: 0.0, + error: Some(format!("{}", e)), + }, + }; + + shared.lock().unwrap().finetune_last_run = Some(stats); + // activity drops here, marking completion and notifying observers }); } @@ -706,6 +770,12 @@ impl Mind { let mut bg_rx = self.bg_rx.lock().unwrap().take() .expect("Mind::run() called twice"); let mut sub_handle: Option> = None; + + // Start finetune scoring at startup (scores existing conversation) + if !self.config.no_agents { + self.start_finetune_scoring(); + } + loop { let (timeout, has_input) = { let me = self.shared.lock().unwrap(); diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index f8070ce..c3ad348 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -490,16 +490,18 @@ pub struct FinetuneCandidate { /// Score and enrich finetune candidates with full context. /// -/// Returns candidates ready for review, with context/continuation token IDs -/// already computed for sending to /finetune. +/// Returns (candidates, max_divergence) - candidates ready for review with +/// context/continuation token IDs, and the highest divergence seen. pub async fn score_finetune_candidates( context: &ContextState, count: usize, client: &ApiClient, min_divergence: f64, -) -> anyhow::Result> { +) -> anyhow::Result<(Vec, f64)> { let scores = score_finetune(context, count, client).await?; + let max_divergence = scores.iter().map(|(_, d)| *d).fold(0.0f64, f64::max); + let entries = context.conversation(); let mut candidates = Vec::new(); @@ -562,7 +564,7 @@ pub async fn score_finetune_candidates( } } - Ok(candidates) + Ok((candidates, max_divergence)) } /// Generate what the model would say without memories for a given entry. diff --git a/src/user/learn.rs b/src/user/learn.rs index f858f34..522dbb8 100644 --- a/src/user/learn.rs +++ b/src/user/learn.rs @@ -60,12 +60,16 @@ impl From for FinetuneCandidate { pub(crate) struct LearnScreen { list_state: ListState, + mind_tx: tokio::sync::mpsc::UnboundedSender, } impl LearnScreen { - pub fn new() -> Self { + pub fn new( + mind_tx: tokio::sync::mpsc::UnboundedSender, + ) -> Self { Self { list_state: ListState::default(), + mind_tx, } } @@ -112,6 +116,22 @@ impl ScreenView for LearnScreen { KeyCode::Char('s') => { app.finetune_send_approved(); } + KeyCode::Char('+') | KeyCode::Char('=') => { + // Raise threshold 10× (less sensitive — fewer candidates) + if let Some(ms) = &app.mind_state { + let new = ms.learn_threshold * 10.0; + let _ = self.mind_tx.send( + crate::mind::MindCommand::SetLearnThreshold(new)); + } + } + KeyCode::Char('-') => { + // Lower threshold 10× (more sensitive — more candidates) + if let Some(ms) = &app.mind_state { + let new = ms.learn_threshold / 10.0; + let _ = self.mind_tx.send( + crate::mind::MindCommand::SetLearnThreshold(new)); + } + } _ => {} } } @@ -123,19 +143,13 @@ impl ScreenView for LearnScreen { self.list_state.select(Some(sel)); } - // Get scoring progress from mind state - let progress = app.mind_state.as_ref() - .map(|ms| ms.finetune_progress.as_str()) - .unwrap_or(""); - // Now render let gen_on = crate::subconscious::learn::alternates_enabled(); - let title_right = if !progress.is_empty() { - format!(" {} ", progress) - } else if gen_on { - " learn [gen] ".to_string() + let threshold = app.mind_state.as_ref().map(|ms| ms.learn_threshold).unwrap_or(0.0); + let title_right = if gen_on { + format!(" learn [thresh: {:e}] [gen] ", threshold) } else { - " learn ".to_string() + format!(" learn [thresh: {:e}] ", threshold) }; let block = Block::default() .title_top(Line::from(screen_legend()).left_aligned()) @@ -148,58 +162,50 @@ impl ScreenView for LearnScreen { let candidates = &app.finetune_candidates; if candidates.is_empty() { - let msg = if progress.is_empty() { - " No candidates yet — scoring runs after each turn." - } else { - " Scoring in progress..." - }; - frame.render_widget( - Paragraph::new(Line::styled(msg, Style::default().fg(Color::DarkGray))), - inner, - ); - return; - } + render_empty(frame, inner, app); + } else { + // Layout: list on left, detail on right + let [list_area, detail_area] = Layout::horizontal([ + Constraint::Percentage(40), + Constraint::Percentage(60), + ]).areas(inner); - // Layout: list on left, detail on right - let [list_area, detail_area] = Layout::horizontal([ - Constraint::Percentage(40), - Constraint::Percentage(60), - ]).areas(inner); + // Render candidate list + let items: Vec = candidates.iter().map(|c| { + let status_char = match c.status { + CandidateStatus::Pending => ' ', + CandidateStatus::Approved => '+', + CandidateStatus::Rejected => '-', + CandidateStatus::Sent => '*', + }; + let style = match c.status { + CandidateStatus::Pending => Style::default(), + CandidateStatus::Approved => Style::default().fg(Color::Green), + CandidateStatus::Rejected => Style::default().fg(Color::DarkGray), + CandidateStatus::Sent => Style::default().fg(Color::Cyan), + }; + ListItem::new(Line::from(vec![ + Span::styled(format!("[{}] ", status_char), style), + Span::styled(format!("{:.2} ", c.divergence), Style::default().fg(Color::Yellow)), + Span::raw(truncate(&c.response_text, 30)), + ])) + }).collect(); - // Render candidate list - let items: Vec = candidates.iter().map(|c| { - let status_char = match c.status { - CandidateStatus::Pending => ' ', - CandidateStatus::Approved => '+', - CandidateStatus::Rejected => '-', - CandidateStatus::Sent => '*', - }; - let style = match c.status { - CandidateStatus::Pending => Style::default(), - CandidateStatus::Approved => Style::default().fg(Color::Green), - CandidateStatus::Rejected => Style::default().fg(Color::DarkGray), - CandidateStatus::Sent => Style::default().fg(Color::Cyan), - }; - ListItem::new(Line::from(vec![ - Span::styled(format!("[{}] ", status_char), style), - Span::styled(format!("{:.2} ", c.divergence), Style::default().fg(Color::Yellow)), - Span::raw(truncate(&c.response_text, 30)), - ])) - }).collect(); + let list = List::new(items) + .block(Block::default().borders(Borders::RIGHT).title(" candidates ")) + .highlight_style(Style::default().add_modifier(Modifier::REVERSED)); + frame.render_stateful_widget(list, list_area, &mut self.list_state); - let list = List::new(items) - .block(Block::default().borders(Borders::RIGHT).title(" candidates ")) - .highlight_style(Style::default().add_modifier(Modifier::REVERSED)); - frame.render_stateful_widget(list, list_area, &mut self.list_state); - - // Render detail for selected candidate - if let Some(idx) = self.selected_idx() { - if let Some(candidate) = candidates.get(idx) { - render_detail(frame, candidate, detail_area); + // Render detail for selected candidate + if let Some(idx) = self.selected_idx() { + if let Some(candidate) = candidates.get(idx) { + render_detail(frame, candidate, detail_area); + } } } - // Render help at bottom + // Render help at bottom (always, even when empty) + let gen_status = if gen_on { "[on]" } else { "[off]" }; let help = Line::from(vec![ Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)), Span::raw("=nav "), @@ -208,9 +214,11 @@ impl ScreenView for LearnScreen { Span::styled("r", Style::default().fg(Color::Red)), Span::raw("=reject "), Span::styled("g", Style::default().fg(Color::Yellow)), - Span::raw("=gen "), + Span::raw(format!("=gen{} ", gen_status)), Span::styled("s", Style::default().fg(Color::Magenta)), - Span::raw("=send "), + Span::raw("=send "), + Span::styled("+/-", Style::default().fg(Color::Cyan)), + Span::raw("=thresh "), ]); let help_area = Rect { y: area.y + area.height - 1, @@ -221,6 +229,56 @@ impl ScreenView for LearnScreen { } } +fn render_empty(frame: &mut Frame, inner: Rect, app: &App) { + let mut lines = Vec::new(); + lines.push(Line::from("")); + + match app.mind_state.as_ref().and_then(|ms| ms.finetune_last_run.as_ref()) { + Some(stats) => { + lines.push(Line::from(vec![ + Span::raw(" Last run: "), + Span::styled( + format!("{}", stats.responses_considered), + Style::default().fg(Color::Cyan), + ), + Span::raw(" responses considered, "), + Span::styled( + format!("{}", stats.above_threshold), + Style::default().fg(if stats.above_threshold > 0 { Color::Green } else { Color::DarkGray }), + ), + Span::raw(" above threshold, max divergence: "), + Span::styled( + format!("{:.4}", stats.max_divergence), + Style::default().fg(Color::Yellow), + ), + ])); + if let Some(err) = &stats.error { + lines.push(Line::from(vec![ + Span::raw(" "), + Span::styled( + format!("Error: {}", err), + Style::default().fg(Color::Red), + ), + ])); + } + } + None => { + lines.push(Line::styled( + " No scoring run yet.", + Style::default().fg(Color::DarkGray), + )); + } + } + + lines.push(Line::from("")); + lines.push(Line::styled( + " Scoring runs at startup and after each turn.", + Style::default().fg(Color::DarkGray), + )); + + frame.render_widget(Paragraph::new(lines), inner); +} + fn render_detail(frame: &mut Frame, c: &FinetuneCandidate, area: Rect) { let [header_area, content_area] = Layout::vertical([ Constraint::Length(3), diff --git a/src/user/mod.rs b/src/user/mod.rs index edd2b00..18c33e7 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -389,7 +389,7 @@ async fn run( Box::new(crate::user::subconscious::SubconsciousScreen::new()), Box::new(crate::user::unconscious::UnconsciousScreen::new()), Box::new(crate::user::thalamus::ThalamusScreen::new()), - Box::new(crate::user::learn::LearnScreen::new()), + Box::new(crate::user::learn::LearnScreen::new(mind_tx.clone())), ]; let mut active_screen: usize = 1; // F-key number tui::set_screen_legend(tui::screen_legend_from(&*screens)); @@ -466,7 +466,8 @@ async fn run( idle_state.decay_ewma(); app.update_idle(&idle_state); app.agent_state = mind.subconscious_snapshots().await; - if let Ok(mut unc) = mind.unconscious.try_lock() { + { + let mut unc = mind.unconscious.lock().await; let toggles: Vec = app.agent_toggles.drain(..).collect(); for name in &toggles { if mind.subconscious.lock().await.toggle(name).is_none() { @@ -480,10 +481,13 @@ async fn run( }; app.unconscious_state = unc.snapshots(store_guard.as_deref()); app.graph_health = unc.graph_health.clone(); + } + + // Sync mind state (finetune candidates, last scoring run, etc.) + { let ms = mind.shared.lock().unwrap(); - // Sync finetune candidates: add new ones, keep existing (preserves approval status) - // Remove sent candidates (already trained, no need to keep) - // Keep only 10 most recent rejected candidates + // Sync finetune candidates: add new ones, keep existing (preserves approval status), + // remove sent candidates, keep only 10 most recent rejected. app.finetune_candidates.retain(|c| c.status != learn::CandidateStatus::Sent); for c in &ms.finetune_candidates { let exists = app.finetune_candidates.iter() @@ -492,7 +496,6 @@ async fn run( app.finetune_candidates.push(learn::FinetuneCandidate::from(c.clone())); } } - // Limit rejected candidates to 10 most recent let mut rejected: Vec<_> = app.finetune_candidates.iter() .enumerate() .filter(|(_, c)| c.status == learn::CandidateStatus::Rejected) From 77822992c86531d842bdb7bb0a9048ba2e69e46d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 12:19:28 -0400 Subject: [PATCH 18/94] learn: score_ranges is now required; short-circuit on empty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vllm's /v1/score endpoint made score_ranges a required field (the messages-mode fallback that used to pattern-scan for assistant boundaries is gone). Always send the field, and if we have nothing to score, skip the HTTP round-trip entirely instead of letting the server 422 us. Response parsing is unchanged — serde ignores the renamed range_index field and the dropped role field since we only extract total_logprob. Co-Authored-By: Proof of Concept --- src/subconscious/learn.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index c3ad348..9b65f2a 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -129,16 +129,18 @@ async fn call_score( ranges: &[(usize, usize)], priority: Option, ) -> anyhow::Result> { + // Nothing to score — skip the round-trip. + if ranges.is_empty() { + return Ok(Vec::new()); + } let url = format!("{}/score", client.base_url()); let auth = format!("Bearer {}", client.api_key()); let mut body = serde_json::json!({ "model": client.model, "prompt": prompt, + "score_ranges": ranges, "logprobs": 1, }); - if !ranges.is_empty() { - body["score_ranges"] = serde_json::json!(ranges); - } if let Some(p) = priority { body["priority"] = serde_json::json!(p); } From 080b4f90844a0ced0d1b1b90fea3f22efc2eebda Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 12:35:16 -0400 Subject: [PATCH 19/94] context: tighten timestamp schema; every AstNode has one MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously NodeLeaf.timestamp and AstNode::Branch.timestamp accepted null or missing via a deserialize_timestamp_or_epoch fallback — legacy entries in conversation.jsonl from before Branch timestamps existed (and from before chrono serialization was wired up) would load with UNIX_EPOCH as a sentinel. Downstream, node_timestamp_ns() returned Option and callers had to handle None as "old entry, skip." That second filter was silently dropping every candidate in score_finetune_candidates when scoring an older session — the F6 screen showed "0 above threshold" even when max_divergence was orders of magnitude above the threshold, because every entry was failing the None check, not the divergence check. The fix, in three parts: 1. src/bin/fix-timestamps.rs — one-off migration tool that walks a conversation.jsonl, linearly interpolates timestamps for entries stuck at UNIX_EPOCH (using surrounding real timestamps as anchors), propagates to child leaves with per-sibling ns offsets, and bumps any collisions by 1 ns for uniqueness. Ran against the current session's log: 11887 entries, 72289 ns bumps, all unique. 2. context.rs — drop default_timestamp and deserialize_timestamp_or_epoch. NodeLeaf and Branch now require a present non-null timestamp on deserialize. Tests flip from "missing/null → UNIX_EPOCH" to "missing/null → Err." 3. subconscious/learn.rs — node_timestamp_ns now returns i64, not Option. The matching caller in score_finetune_candidates collapses from a Some/None match to a single trained-set check. mind/log.rs's oldest_timestamp no longer filters UNIX_EPOCH. Every line currently on disk has already been migrated. Going forward, new AstNodes always carry real timestamps (Utc::now() at construction time), so the strict schema is the invariant, not an aspiration. Co-Authored-By: Proof of Concept --- src/agent/context.rs | 67 ++++---------- src/bin/fix-timestamps.rs | 180 ++++++++++++++++++++++++++++++++++++++ src/mind/log.rs | 6 +- src/subconscious/learn.rs | 28 +++--- 4 files changed, 210 insertions(+), 71 deletions(-) create mode 100644 src/bin/fix-timestamps.rs diff --git a/src/agent/context.rs b/src/agent/context.rs index 5a90ad0..cc8044a 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -85,19 +85,6 @@ pub enum NodeBody { Log(String), } -fn default_timestamp() -> DateTime { - DateTime::UNIX_EPOCH -} - -/// Deserialize timestamp, treating both missing and null as UNIX_EPOCH. -fn deserialize_timestamp_or_epoch<'de, D>(deserializer: D) -> Result, D::Error> -where - D: serde::Deserializer<'de>, -{ - let opt: Option> = Option::deserialize(deserializer)?; - Ok(opt.unwrap_or(DateTime::UNIX_EPOCH)) -} - /// A leaf node: typed content with cached token IDs. /// Token IDs are not serialized — they're recomputed on deserialization. #[derive(Debug, Clone, Serialize)] @@ -113,7 +100,6 @@ impl<'de> Deserialize<'de> for NodeLeaf { #[derive(Deserialize)] struct Raw { body: NodeBody, - #[serde(default = "default_timestamp", deserialize_with = "deserialize_timestamp_or_epoch")] timestamp: DateTime, } let raw = Raw::deserialize(deserializer)?; @@ -133,7 +119,6 @@ pub enum AstNode { Branch { role: Role, children: Vec, - #[serde(default = "default_timestamp", deserialize_with = "deserialize_timestamp_or_epoch")] timestamp: DateTime, /// Per-response memory attribution from full scoring matrix. /// Maps memory key → divergence score for this response. @@ -1363,45 +1348,31 @@ mod tests { // -- Timestamp deserialization tests ------------------------------------------ #[test] - fn test_timestamp_null_becomes_epoch() { - // Old conversation.jsonl entries have "timestamp":null - // serde(default) only handles missing fields, not explicit nulls. - // We need to verify our deserialize handles this correctly. + fn test_timestamp_null_rejected() { + // Missing/null timestamps used to be accepted via a lenient + // deserialize fallback. Post-migration the schema is strict. let json = r#"{"Leaf":{"body":{"Content":"hello"},"timestamp":null}}"#; - let node: AstNode = serde_json::from_str(json).unwrap(); - let leaf = node.leaf().unwrap(); - assert_eq!(leaf.timestamp(), DateTime::::UNIX_EPOCH); + assert!(serde_json::from_str::(json).is_err()); } #[test] - fn test_timestamp_missing_becomes_epoch() { + fn test_timestamp_missing_rejected() { let json = r#"{"Leaf":{"body":{"Content":"hello"}}}"#; + assert!(serde_json::from_str::(json).is_err()); + } + + #[test] + fn test_branch_timestamp_missing_rejected() { + let json = r#"{"Branch":{"role":"User","children":[]}}"#; + assert!(serde_json::from_str::(json).is_err()); + } + + #[test] + fn test_timestamp_present_accepted() { + let json = r#"{"Leaf":{"body":{"Content":"hi"},"timestamp":"2026-04-16T12:00:00Z"}}"#; let node: AstNode = serde_json::from_str(json).unwrap(); let leaf = node.leaf().unwrap(); - assert_eq!(leaf.timestamp(), DateTime::::UNIX_EPOCH); - } - - #[test] - fn test_branch_timestamp_null_becomes_epoch() { - let json = r#"{"Branch":{"role":"User","children":[{"Leaf":{"body":{"Content":"hi"}}}],"timestamp":null}}"#; - let node: AstNode = serde_json::from_str(json).unwrap(); - match node { - AstNode::Branch { timestamp, .. } => { - assert_eq!(timestamp, DateTime::::UNIX_EPOCH); - } - _ => panic!("expected Branch"), - } - } - - #[test] - fn test_branch_timestamp_missing_becomes_epoch() { - let json = r#"{"Branch":{"role":"User","children":[{"Leaf":{"body":{"Content":"hi"}}}]}}"#; - let node: AstNode = serde_json::from_str(json).unwrap(); - match node { - AstNode::Branch { timestamp, .. } => { - assert_eq!(timestamp, DateTime::::UNIX_EPOCH); - } - _ => panic!("expected Branch"), - } + assert_eq!(leaf.timestamp().to_rfc3339(), + "2026-04-16T12:00:00+00:00"); } } diff --git a/src/bin/fix-timestamps.rs b/src/bin/fix-timestamps.rs new file mode 100644 index 0000000..31a8788 --- /dev/null +++ b/src/bin/fix-timestamps.rs @@ -0,0 +1,180 @@ +// fix-timestamps: One-off migration for ~/.consciousness/agent-sessions/ +// conversation.jsonl. +// +// Before Branch nodes carried their own timestamps, early entries were +// serialized with missing/null timestamp fields — they deserialize as +// UNIX_EPOCH via the (now-to-be-removed) deserialize_timestamp_or_epoch +// fallback. Training needs every entry to have a unique timestamp to +// dedup already-trained responses. +// +// Walks the file, synthesizes timestamps for any entry stuck at +// UNIX_EPOCH by linear interpolation between surrounding real +// timestamps. For child leaves inside a Branch, derives timestamps +// from the parent with a tiny per-child offset. +// +// SAFETY: reads from argv[1], writes to argv[1].tmp, renames into +// place. Keep a .bak copy before running. +// +// Usage: fix-timestamps + +use std::io::{BufRead, BufReader, BufWriter, Write}; +use std::path::PathBuf; + +use anyhow::{Context, Result}; +use chrono::{DateTime, Duration, Utc}; + +use consciousness::agent::context::AstNode; + +fn main() -> Result<()> { + let path: PathBuf = std::env::args().nth(1) + .context("usage: fix-timestamps ")?.into(); + + let f = std::fs::File::open(&path) + .with_context(|| format!("open {}", path.display()))?; + let reader = BufReader::new(f); + + let mut nodes: Vec = Vec::new(); + for (i, line) in reader.lines().enumerate() { + let line = line?; + if line.trim().is_empty() { continue; } + let node: AstNode = serde_json::from_str(&line) + .with_context(|| format!("line {}: parse", i + 1))?; + nodes.push(node); + } + println!("read {} entries", nodes.len()); + + fix_top_level_timestamps(&mut nodes); + for node in &mut nodes { + propagate_to_children(node); + } + + // Ensure uniqueness — real timestamps can collide when two entries + // were written in the same ns; synthesized ones can also overlap. + // Bump colliding ns by 1 until unique. + let mut seen = std::collections::HashSet::new(); + let mut bumps = 0usize; + for (i, node) in nodes.iter_mut().enumerate() { + let ts = top_ts(node); + assert!(ts > DateTime::::UNIX_EPOCH, + "entry {}: still UNIX_EPOCH", i); + let mut ns = ts.timestamp_nanos_opt().expect("ts in i64 ns range"); + let mut bumped = false; + while !seen.insert(ns) { + ns += 1; + bumped = true; + bumps += 1; + } + if bumped { + set_top_ts(node, DateTime::::from_timestamp_nanos(ns)); + } + } + println!("all {} timestamps real and unique ({} ns bumps)", + nodes.len(), bumps); + + let tmp = path.with_extension("jsonl.tmp"); + { + let f = std::fs::File::create(&tmp) + .with_context(|| format!("create {}", tmp.display()))?; + let mut w = BufWriter::new(f); + for node in &nodes { + serde_json::to_writer(&mut w, node)?; + w.write_all(b"\n")?; + } + w.flush()?; + } + std::fs::rename(&tmp, &path) + .with_context(|| format!("rename {} -> {}", tmp.display(), path.display()))?; + println!("wrote {}", path.display()); + + Ok(()) +} + +fn top_ts(node: &AstNode) -> DateTime { + match node { + AstNode::Leaf(leaf) => leaf.timestamp(), + AstNode::Branch { timestamp, .. } => *timestamp, + } +} + +fn set_top_ts(node: &mut AstNode, ts: DateTime) { + match node { + AstNode::Leaf(leaf) => *leaf = leaf.clone().with_timestamp(ts), + AstNode::Branch { timestamp, .. } => *timestamp = ts, + } +} + +/// Fill in missing top-level timestamps. Strategy: +/// - If two real timestamps bracket a run of missing ones, linearly +/// interpolate between them. +/// - If missing ones precede the first real one, back-fill using +/// (first_real - N·1µs). +/// - If missing ones follow the last real one, forward-fill. +/// - If no real timestamps exist at all, synthesize from now() going +/// backwards. +fn fix_top_level_timestamps(nodes: &mut [AstNode]) { + let real: Vec<(usize, DateTime)> = nodes.iter().enumerate() + .filter(|(_, n)| top_ts(n) > DateTime::::UNIX_EPOCH) + .map(|(i, n)| (i, top_ts(n))) + .collect(); + + if real.is_empty() { + let now = Utc::now(); + let len = nodes.len(); + for (i, node) in nodes.iter_mut().enumerate() { + let ts = now - Duration::microseconds((len - i) as i64); + set_top_ts(node, ts); + } + return; + } + + // Helper: bisect real[] for the nearest real entries around idx. + let find_bracket = |idx: usize| -> (Option<(usize, DateTime)>, + Option<(usize, DateTime)>) { + let pos = real.binary_search_by_key(&idx, |(i, _)| *i); + let (prior_pos, next_pos) = match pos { + Ok(p) => (Some(p), Some(p)), + Err(p) => ( + if p == 0 { None } else { Some(p - 1) }, + if p >= real.len() { None } else { Some(p) }, + ), + }; + (prior_pos.map(|p| real[p]), next_pos.map(|p| real[p])) + }; + + for i in 0..nodes.len() { + if top_ts(&nodes[i]) > DateTime::::UNIX_EPOCH { + continue; + } + let (prior, next) = find_bracket(i); + let new_ts = match (prior, next) { + (Some((pi, pt)), Some((ni, nt))) if pi != ni => { + // Linear interpolate. + let span_ns = (nt - pt).num_nanoseconds().unwrap_or(0); + let offset_ns = span_ns * (i - pi) as i64 / (ni - pi) as i64; + pt + Duration::nanoseconds(offset_ns) + } + (Some((pi, pt)), _) => { + pt + Duration::microseconds((i - pi) as i64) + } + (None, Some((ni, nt))) => { + nt - Duration::microseconds((ni - i) as i64) + } + (None, None) => unreachable!(), + }; + set_top_ts(&mut nodes[i], new_ts); + } +} + +/// For every Branch, ensure each child Leaf has a timestamp. If missing, +/// use parent.ts + child_idx·1ns so siblings stay unique but close. +fn propagate_to_children(node: &mut AstNode) { + if let AstNode::Branch { timestamp, children, .. } = node { + let parent_ts = *timestamp; + for (ci, child) in children.iter_mut().enumerate() { + if top_ts(child) <= DateTime::::UNIX_EPOCH { + set_top_ts(child, parent_ts + Duration::nanoseconds(ci as i64)); + } + propagate_to_children(child); + } + } +} diff --git a/src/mind/log.rs b/src/mind/log.rs index 03e349c..7ac0d79 100644 --- a/src/mind/log.rs +++ b/src/mind/log.rs @@ -55,17 +55,13 @@ impl ConversationLog { } pub fn oldest_timestamp(&self) -> Option> { - // Read forward from the start to find first non-epoch timestamp let file = File::open(&self.path).ok()?; let mmap = unsafe { Mmap::map(&file).ok()? }; for line in mmap.split(|&b| b == b'\n') { if line.is_empty() { continue; } if let Ok(node) = serde_json::from_slice::(line) { if let Some(leaf) = node.leaf() { - let ts = leaf.timestamp(); - if ts != chrono::DateTime::UNIX_EPOCH { - return Some(ts); - } + return Some(leaf.timestamp()); } } } diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index 9b65f2a..3c12efc 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -516,16 +516,11 @@ pub async fn score_finetune_candidates( let node = &entries[entry_idx]; - // Get timestamp and skip if already trained - let timestamp_ns = match node_timestamp_ns(node) { - Some(ts) => { - if trained.contains(&ts) { - continue; // Already trained, skip - } - ts - } - None => continue, // No timestamp, skip - }; + // Skip if already trained on. + let timestamp_ns = node_timestamp_ns(node); + if trained.contains(×tamp_ns) { + continue; + } // Extract response text let response_text = match node { @@ -661,18 +656,15 @@ pub fn mark_trained(timestamp_ns: i64) { } /// Get timestamp in nanoseconds from an AstNode. -/// Returns None for entries with default UNIX_EPOCH timestamp (old data) -/// or timestamps outside the representable nano range (pre-1677 or post-2262). -pub fn node_timestamp_ns(node: &AstNode) -> Option { +/// i64-ns representation covers 1677..2262 via chrono; timestamps +/// outside that window would be bugs we'd want to surface, hence panic. +pub fn node_timestamp_ns(node: &AstNode) -> i64 { let ts = match node { AstNode::Leaf(leaf) => leaf.timestamp(), AstNode::Branch { timestamp, .. } => *timestamp, }; - if ts == chrono::DateTime::UNIX_EPOCH { - None // Old entry without real timestamp - } else { - ts.timestamp_nanos_opt() - } + ts.timestamp_nanos_opt() + .expect("timestamp outside i64-ns representable range (1677..2262)") } // ── Training API ──────────────────────────────────────────────── From d5a3398cc9fb7846a9a31eee2ab74cba0410f9af Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 12:44:13 -0400 Subject: [PATCH 20/94] learn: move threshold/gen state out of title bar into a settings row MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The F6 title line was starting to read like a control panel — \`legend ───── learn [thresh: 1e-7] [gen]\` — which crowded the legend and the label, and didn't leave room for more settings as the screen grew. Move threshold and gen status to their own line inside the border, right above the content area. Drop the duplicated \`=gen[on]\` marker from the bottom help line since the settings row already shows gen state. Co-Authored-By: Proof of Concept --- src/user/learn.rs | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/user/learn.rs b/src/user/learn.rs index 522dbb8..c77230e 100644 --- a/src/user/learn.rs +++ b/src/user/learn.rs @@ -146,29 +146,41 @@ impl ScreenView for LearnScreen { // Now render let gen_on = crate::subconscious::learn::alternates_enabled(); let threshold = app.mind_state.as_ref().map(|ms| ms.learn_threshold).unwrap_or(0.0); - let title_right = if gen_on { - format!(" learn [thresh: {:e}] [gen] ", threshold) - } else { - format!(" learn [thresh: {:e}] ", threshold) - }; let block = Block::default() .title_top(Line::from(screen_legend()).left_aligned()) - .title_top(Line::from(title_right).right_aligned()) + .title_top(Line::from(" learn ").right_aligned()) .borders(Borders::ALL) .border_style(Style::default().fg(Color::Magenta)); let inner = block.inner(area); frame.render_widget(block, area); + // Split inner: top line for settings, rest for content. + let [settings_area, content_area] = Layout::vertical([ + Constraint::Length(1), + Constraint::Min(0), + ]).areas(inner); + + let settings = Line::from(vec![ + Span::raw(" thresh: "), + Span::styled(format!("{:e}", threshold), Style::default().fg(Color::Yellow)), + Span::raw(" gen: "), + Span::styled( + if gen_on { "[on]" } else { "[off]" }, + Style::default().fg(if gen_on { Color::Green } else { Color::DarkGray }), + ), + ]); + frame.render_widget(Paragraph::new(settings), settings_area); + let candidates = &app.finetune_candidates; if candidates.is_empty() { - render_empty(frame, inner, app); + render_empty(frame, content_area, app); } else { // Layout: list on left, detail on right let [list_area, detail_area] = Layout::horizontal([ Constraint::Percentage(40), Constraint::Percentage(60), - ]).areas(inner); + ]).areas(content_area); // Render candidate list let items: Vec = candidates.iter().map(|c| { @@ -205,7 +217,6 @@ impl ScreenView for LearnScreen { } // Render help at bottom (always, even when empty) - let gen_status = if gen_on { "[on]" } else { "[off]" }; let help = Line::from(vec![ Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)), Span::raw("=nav "), @@ -214,7 +225,7 @@ impl ScreenView for LearnScreen { Span::styled("r", Style::default().fg(Color::Red)), Span::raw("=reject "), Span::styled("g", Style::default().fg(Color::Yellow)), - Span::raw(format!("=gen{} ", gen_status)), + Span::raw("=gen "), Span::styled("s", Style::default().fg(Color::Magenta)), Span::raw("=send "), Span::styled("+/-", Style::default().fg(Color::Cyan)), From 343e43afabb353e136e8aca4585d7ab855cec7a8 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 12:44:25 -0400 Subject: [PATCH 21/94] learn: stream candidates to UI, update status during alternate gen MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the timestamp filter gone (previous commit), score_finetune_candidates started returning the actual ~100+ candidates per scoring run. The existing code generated alternates for all of them in a tight loop before returning anything, leaving the status line stuck on "finetune: scoring N responses..." for ~100s of seconds while the B200 was pegged. Two fixes: 1. score_finetune_candidates now takes an ActivityGuard and a callback. Candidates are emitted one-at-a-time as they complete (after their alternate if that's enabled, immediately otherwise). The activity status updates to "finetune: generating alternate N/M" during the alternate-gen phase so it's clear what's happening. 2. BgEvent::FinetuneCandidates(Vec<_>) → FinetuneCandidate(one). Each emitted candidate is pushed onto shared.finetune_candidates; the UI tick picks it up and renders it on the next frame. start_finetune_scoring clears the previous run's list at the top so each run is fresh. Return type changes from (Vec, f64) → (usize, f64) — the count above threshold is all the caller still needs since the candidates stream through the callback. Co-Authored-By: Proof of Concept --- src/mind/mod.rs | 21 +++++++++++++-------- src/subconscious/learn.rs | 34 +++++++++++++++++++++++----------- 2 files changed, 36 insertions(+), 19 deletions(-) diff --git a/src/mind/mod.rs b/src/mind/mod.rs index 81bcb09..c2cb365 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -320,7 +320,7 @@ impl MindState { /// Background task completion events. enum BgEvent { ScoringDone, - FinetuneCandidates(Vec), + FinetuneCandidate(learn::FinetuneCandidate), } // --- Mind: cognitive state machine --- @@ -656,7 +656,12 @@ impl Mind { /// once this runs continuously, we'll just train whatever lands at full /// context without filtering. pub fn start_finetune_scoring(&self) { - let threshold = self.shared.lock().unwrap().learn_threshold; + let threshold = { + let mut s = self.shared.lock().unwrap(); + // Clear the previous run's candidates so this run's stream in fresh. + s.finetune_candidates.clear(); + s.learn_threshold + }; let agent = self.agent.clone(); let bg_tx = self.bg_tx.clone(); @@ -678,12 +683,12 @@ impl Mind { activity.update(format!("finetune: scoring {} responses...", responses_considered)).await; + let bg_tx_cb = bg_tx.clone(); let stats = match learn::score_finetune_candidates( - &context, score_count, &client, threshold, + &context, score_count, &client, threshold, &activity, + |c| { let _ = bg_tx_cb.send(BgEvent::FinetuneCandidate(c)); }, ).await { - Ok((candidates, max_div)) => { - let above_threshold = candidates.len(); - let _ = bg_tx.send(BgEvent::FinetuneCandidates(candidates)); + Ok((above_threshold, max_div)) => { FinetuneScoringStats { responses_considered, above_threshold, @@ -801,8 +806,8 @@ impl Mind { BgEvent::ScoringDone => { self.shared.lock().unwrap().scoring_in_flight = false; } - BgEvent::FinetuneCandidates(candidates) => { - self.shared.lock().unwrap().finetune_candidates = candidates; + BgEvent::FinetuneCandidate(c) => { + self.shared.lock().unwrap().finetune_candidates.push(c); } } } diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index 3c12efc..2424fa5 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -492,22 +492,28 @@ pub struct FinetuneCandidate { /// Score and enrich finetune candidates with full context. /// -/// Returns (candidates, max_divergence) - candidates ready for review with -/// context/continuation token IDs, and the highest divergence seen. +/// Candidates are delivered via `on_candidate` one-at-a-time as they become +/// ready: scoring happens once (one /score call), then for each candidate +/// that passes the threshold we optionally generate an alternate response +/// and then emit it. The activity status is updated during the alternate +/// phase so the UI doesn't look stuck. +/// +/// Returns (count_above_threshold, max_divergence). pub async fn score_finetune_candidates( context: &ContextState, count: usize, client: &ApiClient, min_divergence: f64, -) -> anyhow::Result<(Vec, f64)> { + activity: &crate::agent::ActivityGuard, + mut on_candidate: impl FnMut(FinetuneCandidate), +) -> anyhow::Result<(usize, f64)> { let scores = score_finetune(context, count, client).await?; let max_divergence = scores.iter().map(|(_, d)| *d).fold(0.0f64, f64::max); let entries = context.conversation(); - let mut candidates = Vec::new(); - let trained = load_trained(); + let mut candidates: Vec = Vec::new(); for (entry_idx, divergence) in scores { if divergence < min_divergence { @@ -522,7 +528,7 @@ pub async fn score_finetune_candidates( continue; } - // Extract response text + // Extract response text. let response_text = match node { AstNode::Branch { children, .. } => { children.iter() @@ -536,7 +542,7 @@ pub async fn score_finetune_candidates( _ => continue, }; - // Build token IDs: context = everything before response, continuation = response + // Build token IDs: context = everything before response, continuation = response. let (context_ids, _) = build_token_ids(context, 0..entry_idx, Filter::None); let continuation_ids: Vec = node.token_ids().into_iter().collect(); @@ -551,17 +557,23 @@ pub async fn score_finetune_candidates( }); } - // Generate alternates if enabled - if alternates_enabled() && !candidates.is_empty() { - for candidate in &mut candidates { + let total = candidates.len(); + let gen_alternates = alternates_enabled() && total > 0; + + for (i, mut candidate) in candidates.into_iter().enumerate() { + if gen_alternates { + activity.update( + format!("finetune: generating alternate {}/{}", i + 1, total) + ).await; match generate_alternate(context, candidate.entry_idx, client).await { Ok(text) => candidate.alternate_text = Some(text), Err(e) => dbglog!("[finetune] alternate generation failed: {:#}", e), } } + on_candidate(candidate); } - Ok((candidates, max_divergence)) + Ok((total, max_divergence)) } /// Generate what the model would say without memories for a given entry. From 313f85f34a9eabdbc098d9d99f9f959a16c3d4db Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 12:53:22 -0400 Subject: [PATCH 22/94] config: global writable AppConfig; learn settings live there MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Runtime-mutable settings (F6's threshold knob, the generate-alternates toggle, anything else that comes along) were ending up as mirrored fields on MindState — each new config setting grew MindState::new's signature and added a clone+sync path. Wrong home. MindState is ephemeral session state, not a config projection. Give AppConfig the same treatment the memory Config has: install it into a global RwLock at startup via load_app, read through config::app() (returns a read guard), mutate through update_app. The config_writer functions now write to disk AND update the cache atomically, so the one-stop-shop call keeps both in sync. Also while in here: - learn.generate_alternates moves from a sentinel file (~/.consciousness/cache/finetune-alternates, "exists = enabled") into the config under the learn section. On first run with this build, if the sentinel file still exists Mind::new flips the config value to true and removes it. Drops alternates_enabled()/set_alternates(). - Default threshold 0.0000001 → 1.0. With the timestamp filter removed the previous value was letting essentially everything through; 1.0 is a sane "nothing gets through unless you actually want it" default. - score_finetune_candidates takes generate_alternates as a parameter instead of reading a global — caller snapshots the config values once at the top of start_finetune_scoring so the async task doesn't need to hold the config read lock across awaits. - MindState.learn_threshold / learn_generate_alternates gone; the SetLearn* command handlers now just delegate to config_writer. Kent noted RwLock> (the pattern used by the memory Config global) is pointless here — nobody needs a snapshot-after- release, reads are short — so this uses a plain RwLock and returns a read guard. Co-Authored-By: Proof of Concept --- src/config.rs | 46 +++++++++++++++++++++++++++++++++++++-- src/config_writer.rs | 12 +++++++++- src/mind/mod.rs | 44 +++++++++++++++++++++++++------------ src/subconscious/learn.rs | 26 ++-------------------- src/user/learn.rs | 32 +++++++++++++-------------- 5 files changed, 102 insertions(+), 58 deletions(-) diff --git a/src/config.rs b/src/config.rs index 3cd9b55..494aea8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -331,13 +331,21 @@ pub struct LearnConfig { /// fine-tuning candidates. Lower = more sensitive. #[serde(default = "default_learn_threshold")] pub threshold: f64, + /// Whether to generate "what would the model have said without + /// memories" alternates alongside each scoring run. Expensive — + /// one full streaming generation per candidate. + #[serde(default)] + pub generate_alternates: bool, } -fn default_learn_threshold() -> f64 { 0.0000001 } +fn default_learn_threshold() -> f64 { 1.0 } impl Default for LearnConfig { fn default() -> Self { - Self { threshold: default_learn_threshold() } + Self { + threshold: default_learn_threshold(), + generate_alternates: false, + } } } @@ -573,12 +581,46 @@ fn build_figment(cli: &crate::user::CliArgs) -> Figment { } /// Load just the AppConfig — no validation, no prompt assembly. +/// Also installs the loaded AppConfig into the global cache so +/// `config::app()` is available everywhere. pub fn load_app(cli: &crate::user::CliArgs) -> Result<(AppConfig, Figment)> { let figment = build_figment(cli); let app: AppConfig = figment.extract().context("Failed to load configuration")?; + install_app(app.clone()); Ok((app, figment)) } +// ============================================================ +// Global AppConfig cache (writable, for runtime-mutable settings +// like learn.threshold that F6 edits via config_writer). +// ============================================================ + +static APP_CONFIG: OnceLock> = OnceLock::new(); + +fn install_app(app: AppConfig) { + let slot = APP_CONFIG.get_or_init(|| RwLock::new(app.clone())); + *slot.write().unwrap() = app; +} + +/// Current AppConfig, held under a read lock. Reads should be brief +/// (no holding across await / long work) to avoid starving writers. +/// Panics if called before load_app — which runs once at startup. +pub fn app() -> std::sync::RwLockReadGuard<'static, AppConfig> { + APP_CONFIG + .get() + .expect("config::app() called before load_app()") + .read() + .unwrap() +} + +/// Mutate the cached AppConfig in place. Used by config_writer to keep +/// the in-memory view in sync with disk after surgical edits to +/// ~/.consciousness/config.json5. +pub fn update_app(f: impl FnOnce(&mut AppConfig)) { + let slot = APP_CONFIG.get().expect("update_app before load_app"); + f(&mut *slot.write().unwrap()); +} + /// Load the full config: figment → AppConfig → resolve backend → assemble prompts. pub async fn load_session(cli: &crate::user::CliArgs) -> Result<(SessionConfig, Figment)> { let (app, figment) = load_app(cli)?; diff --git a/src/config_writer.rs b/src/config_writer.rs index 65401b9..7625295 100644 --- a/src/config_writer.rs +++ b/src/config_writer.rs @@ -140,7 +140,17 @@ fn parse_scalar_literal(literal: &str) -> Result { /// Convenience: set `learn.threshold` to the given f64. pub fn set_learn_threshold(value: f64) -> Result<()> { // {:e} gives the minimal scientific notation that preserves the value. - set_scalar("learn", "threshold", &format!("{:e}", value)) + set_scalar("learn", "threshold", &format!("{:e}", value))?; + crate::config::update_app(|app| app.learn.threshold = value); + Ok(()) +} + +/// Convenience: set `learn.generate_alternates` to the given bool. +pub fn set_learn_generate_alternates(value: bool) -> Result<()> { + set_scalar("learn", "generate_alternates", + if value { "true" } else { "false" })?; + crate::config::update_app(|app| app.learn.generate_alternates = value); + Ok(()) } #[cfg(test)] diff --git a/src/mind/mod.rs b/src/mind/mod.rs index c2cb365..53b76e5 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -151,9 +151,6 @@ pub struct MindState { pub finetune_candidates: Vec, /// Last scoring run stats for UI display. pub finetune_last_run: Option, - /// Divergence threshold for finetune scoring — mutable via F6 hotkeys - /// and persisted back to ~/.consciousness/config.json5. - pub learn_threshold: f64, } /// Stats from the last finetune scoring run. @@ -189,7 +186,6 @@ impl Clone for MindState { unc_idle_deadline: self.unc_idle_deadline, finetune_candidates: self.finetune_candidates.clone(), finetune_last_run: self.finetune_last_run.clone(), - learn_threshold: self.learn_threshold, } } } @@ -206,6 +202,8 @@ pub enum MindCommand { ScoreFinetune, /// Update the finetune divergence threshold and persist to config. SetLearnThreshold(f64), + /// Toggle alternate-response generation during scoring; persist to config. + SetLearnGenerateAlternates(bool), /// Abort current turn, kill processes Interrupt, /// Reset session @@ -215,7 +213,7 @@ pub enum MindCommand { } impl MindState { - pub fn new(max_dmn_turns: u32, learn_threshold: f64) -> Self { + pub fn new(max_dmn_turns: u32) -> Self { Self { input: Vec::new(), turn_active: false, @@ -233,7 +231,6 @@ impl MindState { unc_idle_deadline: Instant::now() + std::time::Duration::from_secs(60), finetune_candidates: Vec::new(), finetune_last_run: None, - learn_threshold, } } @@ -363,9 +360,20 @@ impl Mind { crate::agent::tools::tools(), ).await; + // Migrate legacy "file exists = enabled" sentinel for the + // generate-alternates flag into the config. One-shot; after this + // the sentinel is gone and the config is the source of truth. + let legacy_sentinel = dirs::home_dir().unwrap_or_default() + .join(".consciousness/cache/finetune-alternates"); + if legacy_sentinel.exists() { + if !crate::config::app().learn.generate_alternates { + let _ = crate::config_writer::set_learn_generate_alternates(true); + } + let _ = std::fs::remove_file(&legacy_sentinel); + } + let shared = Arc::new(std::sync::Mutex::new(MindState::new( config.app.dmn.max_turns, - config.app.learn.threshold, ))); let (turn_watch, _) = tokio::sync::watch::channel(false); let (conscious_active, _) = tokio::sync::watch::channel(false); @@ -569,11 +577,16 @@ impl Mind { self.start_finetune_scoring(); } MindCommand::SetLearnThreshold(value) => { - self.shared.lock().unwrap().learn_threshold = value; if let Err(e) = crate::config_writer::set_learn_threshold(value) { dbglog!("[learn] failed to persist threshold {}: {:#}", value, e); } } + MindCommand::SetLearnGenerateAlternates(value) => { + if let Err(e) = crate::config_writer::set_learn_generate_alternates(value) { + dbglog!("[learn] failed to persist generate_alternates {}: {:#}", + value, e); + } + } } } } @@ -656,12 +669,14 @@ impl Mind { /// once this runs continuously, we'll just train whatever lands at full /// context without filtering. pub fn start_finetune_scoring(&self) { - let threshold = { - let mut s = self.shared.lock().unwrap(); - // Clear the previous run's candidates so this run's stream in fresh. - s.finetune_candidates.clear(); - s.learn_threshold + // Snapshot the config values we need before spawning — the scoring + // task shouldn't hold the config read lock across async work. + let (threshold, gen_alternates) = { + let app = crate::config::app(); + (app.learn.threshold, app.learn.generate_alternates) }; + // Clear the previous run's candidates so this run's stream is fresh. + self.shared.lock().unwrap().finetune_candidates.clear(); let agent = self.agent.clone(); let bg_tx = self.bg_tx.clone(); @@ -685,7 +700,8 @@ impl Mind { let bg_tx_cb = bg_tx.clone(); let stats = match learn::score_finetune_candidates( - &context, score_count, &client, threshold, &activity, + &context, score_count, &client, threshold, + gen_alternates, &activity, |c| { let _ = bg_tx_cb.send(BgEvent::FinetuneCandidate(c)); }, ).await { Ok((above_threshold, max_div)) => { diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index 2424fa5..00f0834 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -504,6 +504,7 @@ pub async fn score_finetune_candidates( count: usize, client: &ApiClient, min_divergence: f64, + generate_alternates: bool, activity: &crate::agent::ActivityGuard, mut on_candidate: impl FnMut(FinetuneCandidate), ) -> anyhow::Result<(usize, f64)> { @@ -558,7 +559,7 @@ pub async fn score_finetune_candidates( } let total = candidates.len(); - let gen_alternates = alternates_enabled() && total > 0; + let gen_alternates = generate_alternates && total > 0; for (i, mut candidate) in candidates.into_iter().enumerate() { if gen_alternates { @@ -616,35 +617,12 @@ async fn generate_alternate( use std::path::PathBuf; use std::collections::HashSet; -const FINETUNE_ALTERNATES_FILE: &str = ".consciousness/cache/finetune-alternates"; const TRAINED_RESPONSES_FILE: &str = ".consciousness/cache/trained-responses.json"; -fn alternates_path() -> PathBuf { - dirs::home_dir().unwrap_or_default().join(FINETUNE_ALTERNATES_FILE) -} - fn trained_path() -> PathBuf { dirs::home_dir().unwrap_or_default().join(TRAINED_RESPONSES_FILE) } -/// Check if alternate response generation is enabled. -pub fn alternates_enabled() -> bool { - alternates_path().exists() -} - -/// Toggle alternate response generation and persist the setting. -pub fn set_alternates(enabled: bool) { - let path = alternates_path(); - if enabled { - if let Some(parent) = path.parent() { - let _ = std::fs::create_dir_all(parent); - } - let _ = std::fs::write(&path, ""); - } else { - let _ = std::fs::remove_file(&path); - } -} - /// Load set of trained response timestamps (nanos since epoch). pub fn load_trained() -> HashSet { let path = trained_path(); diff --git a/src/user/learn.rs b/src/user/learn.rs index c77230e..8f3d1bf 100644 --- a/src/user/learn.rs +++ b/src/user/learn.rs @@ -109,28 +109,24 @@ impl ScreenView for LearnScreen { } } KeyCode::Char('g') => { - // Toggle alternate generation and persist - let current = crate::subconscious::learn::alternates_enabled(); - crate::subconscious::learn::set_alternates(!current); + let current = crate::config::app().learn.generate_alternates; + let _ = self.mind_tx.send( + crate::mind::MindCommand::SetLearnGenerateAlternates(!current)); } KeyCode::Char('s') => { app.finetune_send_approved(); } KeyCode::Char('+') | KeyCode::Char('=') => { - // Raise threshold 10× (less sensitive — fewer candidates) - if let Some(ms) = &app.mind_state { - let new = ms.learn_threshold * 10.0; - let _ = self.mind_tx.send( - crate::mind::MindCommand::SetLearnThreshold(new)); - } + // Raise threshold 10× (less sensitive — fewer candidates). + let new = crate::config::app().learn.threshold * 10.0; + let _ = self.mind_tx.send( + crate::mind::MindCommand::SetLearnThreshold(new)); } KeyCode::Char('-') => { - // Lower threshold 10× (more sensitive — more candidates) - if let Some(ms) = &app.mind_state { - let new = ms.learn_threshold / 10.0; - let _ = self.mind_tx.send( - crate::mind::MindCommand::SetLearnThreshold(new)); - } + // Lower threshold 10× (more sensitive — more candidates). + let new = crate::config::app().learn.threshold / 10.0; + let _ = self.mind_tx.send( + crate::mind::MindCommand::SetLearnThreshold(new)); } _ => {} } @@ -144,8 +140,10 @@ impl ScreenView for LearnScreen { } // Now render - let gen_on = crate::subconscious::learn::alternates_enabled(); - let threshold = app.mind_state.as_ref().map(|ms| ms.learn_threshold).unwrap_or(0.0); + let (threshold, gen_on) = { + let app_cfg = crate::config::app(); + (app_cfg.learn.threshold, app_cfg.learn.generate_alternates) + }; let block = Block::default() .title_top(Line::from(screen_legend()).left_aligned()) .title_top(Line::from(" learn ").right_aligned()) From 7ef02c97d1db08501a0057ed5e610901f448d819 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 13:08:19 -0400 Subject: [PATCH 23/94] config_writer: emit pretty multi-line sections, drop json5 crate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously when append_kvp created a new section or added a key, it stuffed the "\n " separator into the new kvp's wsc.0 (the whitespace between its own key and colon) instead of the prior kvp's wsc.3 (the whitespace after the prior trailing comma). Result looked like: lsp_servers: [...], learn : {generate_alternates : true,},} The writer also didn't set any interior whitespace on the new section's JSONObjectContext, so everything crammed onto one line — `{key: val,}` compact, not `{\n key: val,\n}` multi-line. Rewrote the appender as append_kvp_pretty(object, key, value, inner_indent, outer_indent): - separator between kvps goes in the prior kvp's wsc.3, or if we're the first kvp in a fresh object, in the object's own wsc.0 (after its opening `{`) - new kvp's wsc.3 carries `,\n` so the parent's closing `}` lands correctly indented - interior indent vs outer indent are both explicit, so we don't have to rewrite this logic every time we add another nesting level New tests: new_section_exact_multiline_layout asserts byte-exact output shape; new_section_and_key_format_cleanly verifies no key wraps to the next line. Prior tests just substring-matched and happily passed on the broken output — that's why this shipped in the first place. Also: dropped the json5 crate dependency. json-five's serde feature (default) provides the same from_str / to_string API. One fewer dependency, and the two were doing the same job. Co-Authored-By: Proof of Concept --- Cargo.lock | 1 - Cargo.toml | 1 - src/config.rs | 4 +- src/config_writer.rs | 243 +++++++++++++++++++++++++++++++------------ 4 files changed, 177 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b474289..cd4b79f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -493,7 +493,6 @@ dependencies = [ "hyper", "hyper-util", "json-five", - "json5", "libc", "log", "memchr", diff --git a/Cargo.toml b/Cargo.toml index a722ad2..ea42bfa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,6 @@ log = "0.4" serde = { version = "1", features = ["derive"] } serde_json = "1" -json5 = "1.3" json-five = "0.3" ratatui = { version = "0.30", features = ["unstable-rendered-line-info"] } diff --git a/src/config.rs b/src/config.rs index 494aea8..291e742 100644 --- a/src/config.rs +++ b/src/config.rs @@ -175,7 +175,7 @@ impl Config { /// API settings resolved from models + backend configuration. fn try_load_shared() -> Option { let content = std::fs::read_to_string(config_path()).ok()?; - let root: serde_json::Value = json5::from_str(&content).ok()?; + let root: serde_json::Value = json_five::from_str(&content).ok()?; let mem_value = root.get("memory")?; let mut config: Config = serde_json::from_value(mem_value.clone()).ok()?; @@ -545,7 +545,7 @@ impl Provider for Json5File { fn data(&self) -> figment::Result> { match std::fs::read_to_string(&self.0) { Ok(content) => { - let value: figment::value::Value = json5::from_str(&content) + let value: figment::value::Value = json_five::from_str(&content) .map_err(|e| figment::Error::from(format!("{}: {}", self.0.display(), e)))?; Serialized::defaults(value).data() } diff --git a/src/config_writer.rs b/src/config_writer.rs index 7625295..079449f 100644 --- a/src/config_writer.rs +++ b/src/config_writer.rs @@ -52,46 +52,94 @@ fn key_matches(key: &JSONValue, name: &str) -> bool { /// Find (or create) a child object under `parent`, returning a mutable borrow /// of its key_value_pairs vector. -fn get_or_create_object<'a>( - parent: &'a mut JSONValue, +/// Append a new kvp to `object`, setting whitespace so the output is +/// multi-line with the given indentation: +/// +/// ```text +/// {first_key: first_val,} +/// ``` +/// +/// If `object` already has kvps, the separator between the last one and +/// ours goes in the prior kvp's wsc.3. If we're the first kvp, the +/// lead-in after `{` goes in the object's own wsc.0. +fn append_kvp_pretty( + object: &mut JSONValue, + key: JSONValue, + value: JSONValue, + inner_indent: &str, + outer_indent: &str, +) -> Result<()> { + let (pairs, ctx) = match object { + JSONValue::JSONObject { key_value_pairs, context } => { + let ctx = context.get_or_insert_with(|| JSONObjectContext { + wsc: (String::new(),), + }); + (key_value_pairs, ctx) + } + _ => return Err(anyhow!("not an object")), + }; + + if pairs.is_empty() { + ctx.wsc.0 = format!("\n{}", inner_indent); + } else { + let prev = pairs.last_mut().unwrap(); + let prev_ctx = prev.context.get_or_insert_with(|| KeyValuePairContext { + wsc: (String::new(), String::from(" "), String::new(), None), + }); + prev_ctx.wsc.3 = Some(format!("\n{}", inner_indent)); + } + + pairs.push(JSONKeyValuePair { + key, + value, + context: Some(KeyValuePairContext { + wsc: ( + String::new(), + String::from(" "), + String::new(), + Some(format!("\n{}", outer_indent)), + ), + }), + }); + + Ok(()) +} + +/// Find or create a child object under `parent`. Returns the index of +/// the kvp in parent's key_value_pairs so the caller can re-borrow +/// afterward. +fn get_or_create_object_idx( + parent: &mut JSONValue, section: &str, -) -> Result<&'a mut Vec> { - let pairs = match parent { - JSONValue::JSONObject { key_value_pairs, .. } => key_value_pairs, + inner_indent: &str, + outer_indent: &str, +) -> Result { + let existing = match parent { + JSONValue::JSONObject { key_value_pairs, .. } => { + key_value_pairs.iter() + .position(|kvp| key_matches(&kvp.key, section)) + } _ => return Err(anyhow!("config root is not an object")), }; - // Separate the lookup from the mutable borrow we return — needed to - // satisfy the borrow checker when we create a new entry. - let idx = pairs.iter().position(|kvp| key_matches(&kvp.key, section)); + if let Some(i) = existing { + return Ok(i); + } - let idx = match idx { - Some(i) => i, - None => { - pairs.push(JSONKeyValuePair { - key: JSONValue::Identifier(section.to_string()), - value: JSONValue::JSONObject { - key_value_pairs: Vec::new(), - context: Some(JSONObjectContext { - wsc: (String::new(),), - }), - }, - context: Some(KeyValuePairContext { - wsc: ( - String::from("\n\n "), // whitespace before ':' - String::from(" "), // whitespace after ':' - String::new(), // whitespace after value - Some(String::new()), // whitespace after trailing comma - ), - }), - }); - pairs.len() - 1 - } - }; + append_kvp_pretty( + parent, + JSONValue::Identifier(section.to_string()), + JSONValue::JSONObject { + key_value_pairs: Vec::new(), + context: Some(JSONObjectContext { wsc: (String::new(),) }), + }, + inner_indent, + outer_indent, + )?; - match &mut pairs[idx].value { - JSONValue::JSONObject { key_value_pairs, .. } => Ok(key_value_pairs), - _ => Err(anyhow!("config key '{}' is not an object", section)), + match parent { + JSONValue::JSONObject { key_value_pairs, .. } => Ok(key_value_pairs.len() - 1), + _ => unreachable!(), } } @@ -100,26 +148,36 @@ fn get_or_create_object<'a>( pub fn set_scalar(section: &str, key: &str, literal: &str) -> Result<()> { let value = parse_scalar_literal(literal)?; edit_config(|root| { - let pairs = get_or_create_object(root, section)?; + // New top-level sections sit at column 4 (inside root `{`), + // and the root's closing `}` sits at column 0. + let section_idx = get_or_create_object_idx(root, section, " ", "")?; - if let Some(kvp) = pairs.iter_mut().find(|k| key_matches(&k.key, key)) { - kvp.value = value; - return Ok(()); + let section_value = match root { + JSONValue::JSONObject { key_value_pairs, .. } => { + &mut key_value_pairs[section_idx].value + } + _ => unreachable!(), + }; + + // Update in place if the key already exists. + if let JSONValue::JSONObject { key_value_pairs, .. } = section_value { + if let Some(kvp) = key_value_pairs.iter_mut() + .find(|k| key_matches(&k.key, key)) + { + kvp.value = value; + return Ok(()); + } } - pairs.push(JSONKeyValuePair { - key: JSONValue::Identifier(key.to_string()), + // Append a new kvp. Inner keys sit at column 8, the section's + // closing `}` sits at column 4. + append_kvp_pretty( + section_value, + JSONValue::Identifier(key.to_string()), value, - context: Some(KeyValuePairContext { - wsc: ( - String::from("\n "), - String::from(" "), - String::new(), - Some(String::new()), - ), - }), - }); - Ok(()) + " ", + " ", + ) }) } @@ -166,24 +224,28 @@ mod tests { literal: &str, ) -> Result<()> { let value = parse_scalar_literal(literal)?; - let pairs = get_or_create_object(root, section)?; - if let Some(kvp) = pairs.iter_mut().find(|k| key_matches(&k.key, key)) { - kvp.value = value; - return Ok(()); + let section_idx = get_or_create_object_idx(root, section, " ", "")?; + let section_value = match root { + JSONValue::JSONObject { key_value_pairs, .. } => { + &mut key_value_pairs[section_idx].value + } + _ => unreachable!(), + }; + if let JSONValue::JSONObject { key_value_pairs, .. } = section_value { + if let Some(kvp) = key_value_pairs.iter_mut() + .find(|k| key_matches(&k.key, key)) + { + kvp.value = value; + return Ok(()); + } } - pairs.push(JSONKeyValuePair { - key: JSONValue::Identifier(key.to_string()), + append_kvp_pretty( + section_value, + JSONValue::Identifier(key.to_string()), value, - context: Some(KeyValuePairContext { - wsc: ( - String::from("\n "), - String::from(" "), - String::new(), - Some(String::new()), - ), - }), - }); - Ok(()) + " ", + " ", + ) } fn edit_str Result<()>>(src: &str, f: F) -> Result { @@ -302,7 +364,7 @@ mod tests { assert!(out.contains("1e-7")); // Parse result should parse back without error (real json5 parser). - let reparsed: serde_json::Value = json5::from_str(&out) + let reparsed: serde_json::Value = json_five::from_str(&out) .expect("mutated output must be valid JSON5"); let threshold = reparsed.pointer("/learn/threshold").expect("learn.threshold exists"); assert_eq!(threshold.as_f64(), Some(1e-7)); @@ -324,10 +386,55 @@ mod tests { assert!(!out.contains("0.001")); assert!(out.contains("// The divergence threshold")); - let reparsed: serde_json::Value = json5::from_str(&out).unwrap(); + let reparsed: serde_json::Value = json_five::from_str(&out).unwrap(); assert_eq!(reparsed.pointer("/learn/threshold").and_then(|v| v.as_f64()), Some(5e-8)); } + #[test] + fn new_section_exact_multiline_layout() { + let src = "{\n a: 1,\n}"; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "generate_alternates", "true")?; + set_scalar_inline(root, "learn", "threshold", "1e-7") + }).unwrap(); + + let expected = "\ +{ + a: 1, + learn: { + generate_alternates: true, + threshold: 1e-7, + }, +}"; + assert_eq!(out, expected, "\n--- got ---\n{}\n--- want ---\n{}\n", out, expected); + } + + #[test] + fn new_section_and_key_format_cleanly() { + // The kind of config we actually have in ~/.consciousness + // (top-level sections separated by blank lines, 4-space indent + // for keys within each section). Appending a fresh `learn` + // section with one key should land cleanly, not as + // `learn\n\n :{key\n :value}`. + let src = "{\n memory: {\n user_name: \"Kent\",\n },\n}"; + let out = edit_str(src, |root| { + set_scalar_inline(root, "learn", "generate_alternates", "true") + }).unwrap(); + + // No stray key-to-colon-on-next-line anywhere. + assert!(!out.contains("learn\n"), "learn key wraps: {}", out); + assert!(!out.contains("generate_alternates\n"), + "inner key wraps: {}", out); + + // The output should reparse. + let v: serde_json::Value = json_five::from_str(&out).unwrap(); + assert_eq!( + v.pointer("/learn/generate_alternates").and_then(|x| x.as_bool()), + Some(true), + "output: {}", out, + ); + } + #[test] fn roundtrip_stable_without_change() { let src = r#"{ From 2eddf3b4cffa90afe5fc523112f094cdb50d1a7a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 13:20:03 -0400 Subject: [PATCH 24/94] learn: skip empty responses; show prior conversation context on F6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two fixes to the F6 candidate display: 1. Turns where the assistant produced nothing human-visible (an interrupted generation, a turn consisting of only a tool call the renderer folds to the tool name) were landing as candidates with an empty response_text. They'd render as blank cards and, worse, we'd still burn a full alternate generation on each one. Filter them out before they reach the candidate list. 2. The detail pane showed only the scored response + alternate, with no hint of what the user had actually asked. Pre-compute the last two user/assistant exchanges on each candidate as a rendered prior_context string ([user]/[assistant] markers) and show them above the response, under a new "context & response" section heading. render_branch_text and render_prior_context extracted as helpers — the response-text rendering and prior-context rendering share the same "flatten Branch children to text" pass. Co-Authored-By: Proof of Concept --- src/subconscious/learn.rs | 70 +++++++++++++++++++++++++++++++++------ src/user/learn.rs | 22 ++++++++---- 2 files changed, 76 insertions(+), 16 deletions(-) diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index 00f0834..7137211 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -474,12 +474,59 @@ pub async fn score_finetune( Ok(results) } +/// Concatenate the text of a Branch's Leaf children — what the model +/// actually produced on that turn (Content + Thinking + ToolCall name). +fn render_branch_text(children: &[AstNode]) -> String { + children.iter() + .filter_map(|c| match c { + AstNode::Leaf(leaf) => Some(leaf.body().text().to_string()), + _ => None, + }) + .collect::>() + .join("") +} + +/// Render the last `max_msgs` user/assistant branches before `idx` as a +/// review-friendly string with `[user]` / `[assistant]` markers. +fn render_prior_context(entries: &[AstNode], idx: usize, max_msgs: usize) -> String { + use crate::agent::context::Role; + let mut picked: Vec<&AstNode> = Vec::with_capacity(max_msgs); + for i in (0..idx).rev() { + if picked.len() >= max_msgs { break; } + if let AstNode::Branch { role, .. } = &entries[i] { + if matches!(role, Role::User | Role::Assistant) { + picked.push(&entries[i]); + } + } + } + picked.reverse(); + + let mut out = String::new(); + for node in picked { + if let AstNode::Branch { role, children, .. } = node { + let marker = match role { + Role::User => "[user]", + Role::Assistant => "[assistant]", + _ => continue, + }; + out.push_str(marker); + out.push('\n'); + out.push_str(render_branch_text(children).trim()); + out.push_str("\n\n"); + } + } + out.trim_end().to_string() +} + /// Enriched finetune candidate with context for review. #[derive(Clone, Debug)] pub struct FinetuneCandidate { pub entry_idx: usize, pub divergence: f64, pub response_text: String, + /// Last couple of user/assistant messages before this response, + /// already rendered with role markers, for F6 display context. + pub prior_context: String, /// Token IDs for context (everything before the response). pub context_ids: Vec, /// Token IDs for the response (what we're training on). @@ -529,20 +576,22 @@ pub async fn score_finetune_candidates( continue; } - // Extract response text. + // Extract response text — content of the assistant turn. let response_text = match node { - AstNode::Branch { children, .. } => { - children.iter() - .filter_map(|c| match c { - AstNode::Leaf(leaf) => Some(leaf.body().text().to_string()), - _ => None, - }) - .collect::>() - .join("") - } + AstNode::Branch { children, .. } => render_branch_text(children), _ => continue, }; + // Skip turns that produced nothing human-visible (e.g., a + // tool-only turn, or an interrupted generation). They'd show + // up as blank cards and we'd still burn alternate-gen on them. + if response_text.trim().is_empty() { + continue; + } + + // Build the last couple of user/assistant exchanges for review. + let prior_context = render_prior_context(entries, entry_idx, 2); + // Build token IDs: context = everything before response, continuation = response. let (context_ids, _) = build_token_ids(context, 0..entry_idx, Filter::None); let continuation_ids: Vec = node.token_ids().into_iter().collect(); @@ -551,6 +600,7 @@ pub async fn score_finetune_candidates( entry_idx, divergence, response_text, + prior_context, context_ids, continuation_ids, alternate_text: None, diff --git a/src/user/learn.rs b/src/user/learn.rs index 8f3d1bf..0bd351f 100644 --- a/src/user/learn.rs +++ b/src/user/learn.rs @@ -23,6 +23,8 @@ pub struct FinetuneCandidate { pub divergence: f64, /// The assistant response text. pub response_text: String, + /// Prior user/assistant messages for review context. + pub prior_context: String, /// Status: pending, approved, rejected, sent. pub status: CandidateStatus, /// Token IDs for context. @@ -49,6 +51,7 @@ impl From for FinetuneCandidate { entry_idx: c.entry_idx, divergence: c.divergence, response_text: c.response_text, + prior_context: c.prior_context, status: CandidateStatus::Pending, context_ids: c.context_ids, continuation_ids: c.continuation_ids, @@ -305,15 +308,22 @@ fn render_detail(frame: &mut Frame, c: &FinetuneCandidate, area: Rect) { ]); frame.render_widget(header, header_area); - // Content: response and alternate (if available) + // Content: prior context, the scored response, and alternate + // (if available). let content_block = Block::default() .borders(Borders::TOP) - .title(" response "); + .title(" context & response "); - let text = match &c.alternate_text { - Some(alt) => format!(" {}\n\n─── without memories ───\n\n {}", c.response_text, alt), - None => format!(" {}", c.response_text), - }; + let mut text = String::new(); + if !c.prior_context.is_empty() { + text.push_str(&c.prior_context); + text.push_str("\n\n─── response ───\n\n"); + } + text.push_str(&c.response_text); + if let Some(alt) = &c.alternate_text { + text.push_str("\n\n─── without memories ───\n\n"); + text.push_str(alt); + } let content = Paragraph::new(text) .block(content_block) From 0e6b5dc8be5d869248067615a759ba27d7747b05 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 15:41:28 -0400 Subject: [PATCH 25/94] agent: phase-aware bail script for surface-observe concurrency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bail-no-competing.sh used to bail if any other live agent existed in the state dir, period. That was too coarse: surface-observe agents run a multi-step pipeline (surface → organize-search → organize-new → observe), and the intent is to let a new surface-phase agent start while an older one finishes its post-surface tail. With the old check the newer agent always bailed, so surface-observe was effectively serialized at the slowest cycle time. Make the script phase-aware: - oneshot.rs now passes the current phase as argv[2] alongside the pid file name. The script writes that phase into its own pid file on every step transition, so concurrent agents can read each other's phase just by cat'ing the pid files. - Bail only when another live agent is in the same phase-group as us. Groups: "surface" vs. "everything else" (post-surface). At most one agent per group alive at a time — surface runs at a higher cadence than the organize/observe tail. - Still clean up stale pid files for dead processes. Co-Authored-By: Proof of Concept --- src/agent/oneshot.rs | 7 ++- src/subconscious/agents/bail-no-competing.sh | 46 ++++++++++++++++---- 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/src/agent/oneshot.rs b/src/agent/oneshot.rs index 2fce906..0f04e4d 100644 --- a/src/agent/oneshot.rs +++ b/src/agent/oneshot.rs @@ -497,15 +497,20 @@ pub async fn run_one_agent( .map(|s| s.phase.clone()).collect(); // Bail check: if the agent defines a bail script, run it between steps. + // The script also refreshes our pid-file with the current phase — that's + // how concurrent agents know which phase each of us is in. let bail_script = def.bail.as_ref().map(|name| defs::agents_dir().join(name)); let state_dir_for_bail = state_dir.clone(); - // Find our own pid file so we can pass it to the bail script let our_pid = std::process::id(); let our_pid_file = format!("pid-{}", our_pid); + let step_phases_for_bail = step_phases.clone(); let bail_fn = move |step_idx: usize| -> Result<(), String> { if let Some(ref script) = bail_script { + let phase = step_phases_for_bail.get(step_idx) + .map(String::as_str).unwrap_or(""); let status = std::process::Command::new(script) .arg(&our_pid_file) + .arg(phase) .current_dir(&state_dir_for_bail) .status() .map_err(|e| format!("bail script {:?} failed: {}", script, e))?; diff --git a/src/subconscious/agents/bail-no-competing.sh b/src/subconscious/agents/bail-no-competing.sh index 43c3096..95b8219 100755 --- a/src/subconscious/agents/bail-no-competing.sh +++ b/src/subconscious/agents/bail-no-competing.sh @@ -1,21 +1,49 @@ #!/bin/bash -# Bail if other agents are alive in the state dir. -# $1 = this agent's pid file name (e.g. pid-12345) -# cwd = state dir +# Bail if another agent is in the same phase-group as us. # -# Exit 0 = continue, exit 1 = bail +# $1 = our pid file name (e.g. "pid-12345") +# $2 = the phase we're about to enter (e.g. "surface", "observe") +# cwd = state dir +# +# Also refreshes our own pid file with the current phase on each call, +# so concurrent agents can read each other's phase by cat'ing the pid +# files in the state dir. +# +# Phase groups: "surface" vs everything else ("post-surface"). We allow +# at most one agent per group to be alive at a time — so surface can run +# at a higher frequency than the slower organize/observe tail. +# +# Exit 0 = continue, exit 1 = bail (another agent in our group is alive). shopt -s nullglob my_pid_file="$1" +my_phase="$2" + +# Refresh our own pid file with the current phase. +printf '%s' "$my_phase" > "$my_pid_file" + +group_of() { + if [[ "$1" == "surface" ]]; then + echo "surface" + else + echo "post-surface" + fi +} + +my_group=$(group_of "$my_phase") for f in pid-*; do - [[ $f == $my_pid_file ]] && continue + [[ "$f" == "$my_pid_file" ]] && continue pid="${f#pid-}" - if kill -0 "$pid" 2>/dev/null; then - exit 1 # competing agent is alive - else - rm -f "$f" # stale pid file, clean up + if ! kill -0 "$pid" 2>/dev/null; then + rm -f "$f" # stale pid file, clean up + continue + fi + other_phase=$(cat "$f" 2>/dev/null) + other_group=$(group_of "$other_phase") + if [[ "$my_group" == "$other_group" ]]; then + exit 1 fi done From 2989a6afaaa7e39ef71f6a3216c6e0a2199ab1f1 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 15:41:55 -0400 Subject: [PATCH 26/94] config: drop dead code and collapse to a single backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept --- src/agent/mod.rs | 4 - src/agent/oneshot.rs | 2 +- src/config.rs | 192 +++++++--------------------------------- src/mind/mod.rs | 1 - src/mind/unconscious.rs | 2 +- src/user/chat.rs | 8 +- src/user/context.rs | 11 +-- src/user/mod.rs | 22 +---- 8 files changed, 37 insertions(+), 205 deletions(-) diff --git a/src/agent/mod.rs b/src/agent/mod.rs index db1bf39..5368db6 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -139,7 +139,6 @@ impl DispatchState { pub struct Agent { pub client: ApiClient, pub app_config: crate::config::AppConfig, - pub prompt_file: String, pub session_id: String, pub context: crate::Mutex, pub state: crate::Mutex, @@ -189,7 +188,6 @@ impl Agent { client: ApiClient, personality: Vec<(String, String)>, app_config: crate::config::AppConfig, - prompt_file: String, conversation_log: Option, active_tools: tools::ActiveTools, agent_tools: Vec, @@ -220,7 +218,6 @@ impl Agent { let agent = Arc::new(Self { client, app_config, - prompt_file, session_id, context: crate::Mutex::new(context), state: crate::Mutex::new(AgentState { @@ -259,7 +256,6 @@ impl Agent { Arc::new(Self { client: self.client.clone(), app_config: self.app_config.clone(), - prompt_file: self.prompt_file.clone(), session_id: self.session_id.clone(), context: crate::Mutex::new(ctx), state: crate::Mutex::new(AgentState { diff --git a/src/agent/oneshot.rs b/src/agent/oneshot.rs index 0f04e4d..588a786 100644 --- a/src/agent/oneshot.rs +++ b/src/agent/oneshot.rs @@ -265,7 +265,7 @@ impl AutoAgent { let agent = Agent::new( client, personality, - app, String::new(), + app, None, super::tools::ActiveTools::new(), super::tools::tools(), diff --git a/src/config.rs b/src/config.rs index 291e742..1d5c2c3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -3,9 +3,6 @@ // Single config file: ~/.consciousness/config.json5 // Memory settings in the "memory" section (Config) // Agent/backend settings at top level (AppConfig) -// -// Legacy fallback: ~/.consciousness/config.jsonl -// Env override: POC_MEMORY_CONFIG use std::collections::HashMap; use std::path::PathBuf; @@ -31,7 +28,6 @@ static CONFIG: OnceLock>> = OnceLock::new(); fn default_context_window() -> usize { 128_000 } fn default_stream_timeout() -> u64 { 60 } -fn default_scoring_chunk_tokens() -> usize { 50_000 } fn default_scoring_interval_secs() -> u64 { 3600 } // 1 hour fn default_scoring_response_window() -> usize { 100 } fn default_node_weight() -> f64 { 0.7 } @@ -83,9 +79,6 @@ pub struct Config { /// Stream chunk timeout in seconds (no data = timeout). #[serde(default = "default_stream_timeout")] pub api_stream_timeout_secs: u64, - /// Max tokens per chunk for memory scoring logprobs calls. - #[serde(default = "default_scoring_chunk_tokens")] - pub scoring_chunk_tokens: usize, /// How often to re-score memory nodes (seconds). Default: 3600 (1 hour). #[serde(default = "default_scoring_interval_secs")] pub scoring_interval_secs: u64, @@ -98,15 +91,9 @@ pub struct Config { pub mcp_servers: Vec, #[serde(default)] pub lsp_servers: Vec, - /// Surface agent timeout in seconds. - #[serde(default)] - pub surface_timeout_secs: Option, /// Max conversation bytes to include in surface agent context. #[serde(default)] pub surface_conversation_bytes: Option, - /// Hook events that trigger the surface agent. - #[serde(default)] - pub surface_hooks: Vec, // Spreading activation parameters #[serde(default = "default_node_weight")] @@ -141,7 +128,6 @@ impl Default for Config { api_model: None, api_context_window: default_context_window(), api_stream_timeout_secs: default_stream_timeout(), - scoring_chunk_tokens: default_scoring_chunk_tokens(), scoring_interval_secs: default_scoring_interval_secs(), scoring_response_window: default_scoring_response_window(), agent_model: None, @@ -150,9 +136,7 @@ impl Default for Config { "linker".into(), "organize".into(), "distill".into(), "separator".into(), "split".into(), ], - surface_timeout_secs: None, surface_conversation_bytes: None, - surface_hooks: vec![], mcp_servers: vec![], lsp_servers: vec![], default_node_weight: default_node_weight(), @@ -165,10 +149,7 @@ impl Default for Config { impl Config { fn load_from_file() -> Self { - if let Some(config) = Self::try_load_shared() { - return config; - } - Self::load_legacy_jsonl() + Self::try_load_shared().unwrap_or_default() } /// Load from shared config. Memory settings in the "memory" section; @@ -209,11 +190,6 @@ impl Config { Some(config) } - - /// Load from legacy JSONL config — deprecated, just return defaults. - fn load_legacy_jsonl() -> Self { - Config::default() - } } /// Get the global memory config (cheap Arc clone). @@ -243,19 +219,14 @@ pub fn reload() -> bool { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AppConfig { - pub backend: String, - pub anthropic: BackendConfig, - pub openrouter: BackendConfig, + /// Credentials for the single model backend. #[serde(default)] - pub deepinfra: BackendConfig, - pub prompts: PromptConfig, + pub backend: BackendConfig, pub debug: bool, pub compaction: CompactionConfig, pub dmn: DmnConfig, #[serde(default)] pub learn: LearnConfig, - #[serde(skip_serializing_if = "Option::is_none")] - pub memory_project: Option, #[serde(default)] pub models: HashMap, #[serde(default = "default_model_name")] @@ -288,32 +259,10 @@ pub struct LspServerConfig { pub struct BackendConfig { #[serde(default)] pub api_key: String, - #[serde(default)] - pub model: String, - #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub base_url: Option, } -impl BackendConfig { - fn resolve(&self, default_base: &str) -> Result<(String, String, String)> { - if self.api_key.is_empty() { - anyhow::bail!( - "No API key. Set it in {} or use --api-key", - config_path().display() - ); - } - let base = self.base_url.clone() - .unwrap_or_else(|| default_base.to_string()); - Ok((base, self.api_key.clone(), self.model.clone())) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PromptConfig { - pub anthropic: String, - pub other: String, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CompactionConfig { pub hard_threshold_pct: u32, @@ -351,13 +300,8 @@ impl Default for LearnConfig { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ModelConfig { - /// Backend name ("anthropic" or "openrouter") - pub backend: String, - /// Model identifier sent to the API + /// Model identifier sent to the API. pub model_id: String, - /// Instruction file ("CLAUDE.md" or "POC.md"). - #[serde(default)] - pub prompt_file: Option, /// Context window size in tokens. #[serde(default)] pub context_window: Option, @@ -366,26 +310,7 @@ pub struct ModelConfig { impl Default for AppConfig { fn default() -> Self { Self { - backend: "openrouter".to_string(), - anthropic: BackendConfig { - api_key: String::new(), - model: "claude-opus-4-6-20250918".to_string(), - base_url: None, - }, - openrouter: BackendConfig { - api_key: String::new(), - model: "qwen/qwen3.5-397b-a17b".to_string(), - base_url: Some("https://openrouter.ai/api/v1".to_string()), - }, - deepinfra: BackendConfig { - api_key: String::new(), - model: String::new(), - base_url: Some("https://api.deepinfra.com/v1/openai".to_string()), - }, - prompts: PromptConfig { - anthropic: "CLAUDE.md".to_string(), - other: "POC.md".to_string(), - }, + backend: BackendConfig::default(), debug: false, compaction: CompactionConfig { hard_threshold_pct: 90, @@ -393,7 +318,6 @@ impl Default for AppConfig { }, dmn: DmnConfig { max_turns: 20 }, learn: LearnConfig::default(), - memory_project: None, models: HashMap::new(), default_model: String::new(), mcp_servers: Vec::new(), @@ -409,7 +333,6 @@ pub struct SessionConfig { pub api_base: String, pub api_key: String, pub model: String, - pub prompt_file: String, /// Identity/personality nodes as (name, content) pairs. pub context_parts: Vec<(String, String)>, pub session_dir: PathBuf, @@ -425,37 +348,22 @@ pub struct ResolvedModel { pub api_base: String, pub api_key: String, pub model_id: String, - pub prompt_file: String, pub context_window: Option, } impl AppConfig { - /// Resolve the active backend and assemble prompts into a SessionConfig. + /// Resolve the active model and assemble prompts into a SessionConfig. pub async fn resolve(&self, cli: &crate::user::CliArgs) -> Result { - let (api_base, api_key, model, prompt_file); - - if !self.models.is_empty() { - let model_name = cli.model.as_deref().unwrap_or(&self.default_model); - let resolved = self.resolve_model(model_name)?; - api_base = resolved.api_base; - api_key = resolved.api_key; - model = resolved.model_id; - prompt_file = resolved.prompt_file; - } else { - let (base, key, mdl) = match self.backend.as_str() { - "anthropic" => self.anthropic.resolve("https://api.anthropic.com"), - _ => self.openrouter.resolve("https://openrouter.ai/api/v1"), - }?; - api_base = base; - api_key = key; - model = mdl; - prompt_file = if self.backend == "anthropic" { - self.prompts.anthropic.clone() - } else { - self.prompts.other.clone() - }; + if self.models.is_empty() { + anyhow::bail!( + "no models configured in {}. Add a `models` section with at least one entry.", + config_path().display() + ); } + let model_name = cli.model.as_deref().unwrap_or(&self.default_model); + let resolved = self.resolve_model(model_name)?; + let personality_nodes = get().personality_nodes.clone(); let context_parts = crate::mind::identity::personality_nodes(&personality_nodes).await; @@ -465,11 +373,13 @@ impl AppConfig { std::fs::create_dir_all(&session_dir).ok(); // CLI --api-base and --api-key override everything - let api_base = cli.api_base.clone().unwrap_or(api_base); - let api_key = cli.api_key.clone().unwrap_or(api_key); + let api_base = cli.api_base.clone().unwrap_or(resolved.api_base); + let api_key = cli.api_key.clone().unwrap_or(resolved.api_key); Ok(SessionConfig { - api_base, api_key, model, prompt_file, + api_base, + api_key, + model: resolved.model_id, context_parts, session_dir, app: self.clone(), @@ -486,39 +396,18 @@ impl AppConfig { self.model_names().join(", "), ))?; - let (api_base, api_key) = match model.backend.as_str() { - "anthropic" => ( - self.anthropic.base_url.clone() - .unwrap_or_else(|| "https://api.anthropic.com".to_string()), - self.anthropic.api_key.clone(), - ), - "deepinfra" => ( - self.deepinfra.base_url.clone() - .unwrap_or_else(|| "https://api.deepinfra.com/v1/openai".to_string()), - self.deepinfra.api_key.clone(), - ), - _ => ( - self.openrouter.base_url.clone() - .unwrap_or_else(|| "https://openrouter.ai/api/v1".to_string()), - self.openrouter.api_key.clone(), - ), - }; - - let prompt_file = model.prompt_file.clone() - .unwrap_or_else(|| { - if model.backend == "anthropic" { - self.prompts.anthropic.clone() - } else { - self.prompts.other.clone() - } - }); + let api_base = self.backend.base_url.clone() + .ok_or_else(|| anyhow::anyhow!( + "backend.base_url not set in {}", + config_path().display() + ))?; + let api_key = self.backend.api_key.clone(); Ok(ResolvedModel { name: name.to_string(), api_base, api_key, model_id: model.model_id.clone(), - prompt_file, context_window: model.context_window, }) } @@ -567,11 +456,8 @@ fn build_figment(cli: &crate::user::CliArgs) -> Figment { let mut f = Figment::from(Serialized::defaults(AppConfig::default())) .merge(Json5File(config_path())); - merge_opt!(f, cli.backend, "backend"); - merge_opt!(f, cli.model, "anthropic.model", "openrouter.model"); - merge_opt!(f, cli.api_key, "anthropic.api_key", "openrouter.api_key"); - merge_opt!(f, cli.api_base, "anthropic.base_url", "openrouter.base_url"); - merge_opt!(f, cli.memory_project, "memory_project"); + merge_opt!(f, cli.api_key, "backend.api_key"); + merge_opt!(f, cli.api_base, "backend.base_url"); merge_opt!(f, cli.dmn_max_turns, "dmn.max_turns"); if cli.debug { f = f.merge(Serialized::default("debug", true)); @@ -646,37 +532,23 @@ pub fn show_config(app: &AppConfig, figment: &Figment) { } println!("# Effective configuration\n"); - println!("backend: {:?} ({})", app.backend, src(figment, "backend")); - for (name, b) in [("anthropic", &app.anthropic), ("openrouter", &app.openrouter)] { - println!("\n{}:", name); - println!(" api_key: {} ({})", mask(&b.api_key), src(figment, &format!("{name}.api_key"))); - println!(" model: {:?} ({})", b.model, src(figment, &format!("{name}.model"))); - if let Some(ref url) = b.base_url { - println!(" base_url: {:?} ({})", url, src(figment, &format!("{name}.base_url"))); - } + println!("backend:"); + println!(" api_key: {} ({})", mask(&app.backend.api_key), src(figment, "backend.api_key")); + if let Some(ref url) = app.backend.base_url { + println!(" base_url: {:?} ({})", url, src(figment, "backend.base_url")); } - println!("\nprompts:"); - println!(" anthropic: {:?} ({})", app.prompts.anthropic, src(figment, "prompts.anthropic")); - println!(" other: {:?} ({})", app.prompts.other, src(figment, "prompts.other")); println!("\ndebug: {} ({})", app.debug, src(figment, "debug")); println!("\ncompaction:"); println!(" hard_threshold_pct: {} ({})", app.compaction.hard_threshold_pct, src(figment, "compaction.hard_threshold_pct")); println!(" soft_threshold_pct: {} ({})", app.compaction.soft_threshold_pct, src(figment, "compaction.soft_threshold_pct")); println!("\ndmn:"); println!(" max_turns: {} ({})", app.dmn.max_turns, src(figment, "dmn.max_turns")); - if let Some(ref p) = app.memory_project { - println!("\nmemory_project: {:?} ({})", p, src(figment, "memory_project")); - } println!("\ndefault_model: {:?}", app.default_model); if !app.models.is_empty() { println!("\nmodels:"); for (name, m) in &app.models { println!(" {}:", name); - println!(" backend: {:?}", m.backend); println!(" model_id: {:?}", m.model_id); - if let Some(ref pf) = m.prompt_file { - println!(" prompt_file: {:?}", pf); - } if let Some(cw) = m.context_window { println!(" context_window: {}", cw); } diff --git a/src/mind/mod.rs b/src/mind/mod.rs index 53b76e5..11d45b1 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -354,7 +354,6 @@ impl Mind { client, config.context_parts.clone(), config.app.clone(), - config.prompt_file.clone(), conversation_log, crate::agent::tools::ActiveTools::new(), crate::agent::tools::tools(), diff --git a/src/mind/unconscious.rs b/src/mind/unconscious.rs index 8989264..d8a6aad 100644 --- a/src/mind/unconscious.rs +++ b/src/mind/unconscious.rs @@ -300,7 +300,7 @@ pub async fn prepare_spawn(name: &str, mut auto: AutoAgent, wake: std::sync::Arc let client = crate::agent::api::ApiClient::new(base_url, api_key, model); let agent = crate::agent::Agent::new( client, Vec::new(), - app, String::new(), None, + app, None, crate::agent::tools::ActiveTools::new(), auto.tools.clone(), ).await; diff --git a/src/user/chat.rs b/src/user/chat.rs index a94e039..47c5d56 100644 --- a/src/user/chat.rs +++ b/src/user/chat.rs @@ -112,13 +112,7 @@ pub async fn cmd_switch_model( let _new_client = crate::agent::api::ApiClient::new( &resolved.api_base, &resolved.api_key, &resolved.model_id, ); - let prompt_changed = resolved.prompt_file != agent.prompt_file; - if prompt_changed { - agent.compact().await; - agent.state.lock().await.notify(format!("switched to {} (recompacted)", resolved.model_id)); - } else { - agent.state.lock().await.notify(format!("switched to {}", resolved.model_id)); - } + agent.state.lock().await.notify(format!("switched to {}", resolved.model_id)); } fn notify_help(agent: &std::sync::Arc) { diff --git a/src/user/context.rs b/src/user/context.rs index 4cfa78d..17660b5 100644 --- a/src/user/context.rs +++ b/src/user/context.rs @@ -126,14 +126,7 @@ impl ScreenView for ConsciousScreen { let section_style = Style::default().fg(Color::Yellow); lines.push(Line::styled("── Model ──", section_style)); - let model_display = app.context_info.as_ref() - .map_or_else(|| app.status.model.clone(), |i| i.model.clone()); - lines.push(Line::raw(format!(" Current: {}", model_display))); - if let Some(ref info) = app.context_info { - lines.push(Line::raw(format!(" Backend: {}", info.backend))); - lines.push(Line::raw(format!(" Prompt: {}", info.prompt_file))); - lines.push(Line::raw(format!(" Available: {}", info.available_models.join(", ")))); - } + lines.push(Line::raw(format!(" Current: {}", app.status.model))); lines.push(Line::raw("")); lines.push(Line::styled("── Context State ──", section_style)); @@ -153,8 +146,6 @@ impl ScreenView for ConsciousScreen { lines.push(Line::raw(format!(" {:53} {:>6} tokens", "────────", "──────"))); lines.push(Line::raw(format!(" {:53} {:>6} tokens", "Total", total))); - } else if let Some(ref info) = app.context_info { - lines.push(Line::raw(format!(" Context message: {:>6} chars", info.context_message_chars))); } lines.push(Line::raw("")); diff --git a/src/user/mod.rs b/src/user/mod.rs index 18c33e7..9d33f11 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -45,15 +45,6 @@ struct StatusInfo { } /// Context loading details for the debug screen. -#[derive(Debug, Clone)] -struct ContextInfo { - model: String, - available_models: Vec, - prompt_file: String, - backend: String, - context_message_chars: usize, -} - /// Build the screen legend from screen labels. fn screen_legend_from(screens: &[Box]) -> String { let parts: Vec = screens.iter().enumerate() @@ -110,7 +101,6 @@ struct App { top_k: u32, agent: std::sync::Arc, should_quit: bool, - context_info: Option, agent_state: Vec, unconscious_state: Vec, mind_state: Option, @@ -145,7 +135,6 @@ impl App { top_k: 20, agent, should_quit: false, - context_info: None, agent_state: Vec::new(), unconscious_state: Vec::new(), mind_state: None, @@ -609,16 +598,11 @@ async fn run( // --- CLI --- use clap::{Parser, Subcommand}; -use std::path::PathBuf; #[derive(Parser, Debug, Default)] #[command(name = "consciousness", about = "Substrate-independent AI agent")] pub struct CliArgs { - /// Select active backend ("anthropic" or "openrouter") - #[arg(long)] - pub backend: Option, - - /// Model override + /// Model override (selects a named entry from `models` in config.json5) #[arg(short, long)] pub model: Option, @@ -638,10 +622,6 @@ pub struct CliArgs { #[arg(long)] pub show_config: bool, - /// Project memory directory - #[arg(long)] - pub memory_project: Option, - /// Max consecutive DMN turns #[arg(long)] pub dmn_max_turns: Option, From 3e053316083f6b9fb5b70bd0de71df8f7f28627e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 15:49:53 -0400 Subject: [PATCH 27/94] config: merge ModelConfig into BackendConfig, keyed by name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AppConfig had one BackendConfig for credentials and a separate HashMap for named model entries. In practice each named model was always paired with exactly one backend's credentials — the split bought nothing except an extra struct and the awkward two-lookup shape in resolve_model (find model → get backend creds → combine). Merge them: BackendConfig now carries api_key, base_url, model_id, and context_window. AppConfig has a single HashMap backends map and a default_backend name. resolve_model is one lookup. ModelConfig struct deleted. default_model renamed to default_backend. Config shape changes from backend: { api_key, base_url } models: { "27b": { model_id, context_window } } default_model: "27b" to backends: { "27b": { api_key, base_url, model_id, context_window } } default_backend: "27b" Updated ~/.consciousness/config.json5 to match. One small side effect: dropped the --api-key / --api-base figment merge-opts for "backend.*" targets — those would need to know which backend to target now and there's no sensible default. The CLI flags still function as post-resolution overrides on the eventual SessionConfig. Co-Authored-By: Proof of Concept --- src/config.rs | 96 ++++++++++++++++++++++++--------------------------- 1 file changed, 45 insertions(+), 51 deletions(-) diff --git a/src/config.rs b/src/config.rs index 1d5c2c3..39c69ab 100644 --- a/src/config.rs +++ b/src/config.rs @@ -219,19 +219,19 @@ pub fn reload() -> bool { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AppConfig { - /// Credentials for the single model backend. + /// Named model endpoints — credentials, base URL, and model id bundled + /// into one entry per backend. Keyed by name, selected by + /// `default_backend` or by `--model ` on the CLI. #[serde(default)] - pub backend: BackendConfig, + pub backends: HashMap, + #[serde(default)] + pub default_backend: String, pub debug: bool, pub compaction: CompactionConfig, pub dmn: DmnConfig, #[serde(default)] pub learn: LearnConfig, #[serde(default)] - pub models: HashMap, - #[serde(default = "default_model_name")] - pub default_model: String, - #[serde(default)] pub mcp_servers: Vec, #[serde(default)] pub lsp_servers: Vec, @@ -257,10 +257,17 @@ pub struct LspServerConfig { #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct BackendConfig { + /// API key for the backend. #[serde(default)] pub api_key: String, + /// Base URL for the backend's OpenAI-compatible endpoint. #[serde(default, skip_serializing_if = "Option::is_none")] pub base_url: Option, + /// Model identifier sent to the API. + pub model_id: String, + /// Context window size in tokens. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub context_window: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -298,19 +305,11 @@ impl Default for LearnConfig { } } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ModelConfig { - /// Model identifier sent to the API. - pub model_id: String, - /// Context window size in tokens. - #[serde(default)] - pub context_window: Option, -} - impl Default for AppConfig { fn default() -> Self { Self { - backend: BackendConfig::default(), + backends: HashMap::new(), + default_backend: String::new(), debug: false, compaction: CompactionConfig { hard_threshold_pct: 90, @@ -318,16 +317,12 @@ impl Default for AppConfig { }, dmn: DmnConfig { max_turns: 20 }, learn: LearnConfig::default(), - models: HashMap::new(), - default_model: String::new(), mcp_servers: Vec::new(), lsp_servers: Vec::new(), } } } -fn default_model_name() -> String { String::new() } - /// Resolved, ready-to-use agent session config. pub struct SessionConfig { pub api_base: String, @@ -352,17 +347,17 @@ pub struct ResolvedModel { } impl AppConfig { - /// Resolve the active model and assemble prompts into a SessionConfig. + /// Resolve the active backend and assemble prompts into a SessionConfig. pub async fn resolve(&self, cli: &crate::user::CliArgs) -> Result { - if self.models.is_empty() { + if self.backends.is_empty() { anyhow::bail!( - "no models configured in {}. Add a `models` section with at least one entry.", + "no backends configured in {}. Add a `backends` section with at least one entry.", config_path().display() ); } - let model_name = cli.model.as_deref().unwrap_or(&self.default_model); - let resolved = self.resolve_model(model_name)?; + let name = cli.model.as_deref().unwrap_or(&self.default_backend); + let resolved = self.resolve_model(name)?; let personality_nodes = get().personality_nodes.clone(); let context_parts = crate::mind::identity::personality_nodes(&personality_nodes).await; @@ -387,34 +382,33 @@ impl AppConfig { }) } - /// Look up a named model and resolve its credentials from the backend config. + /// Look up a named backend and resolve its credentials. pub fn resolve_model(&self, name: &str) -> Result { - let model = self.models.get(name) + let b = self.backends.get(name) .ok_or_else(|| anyhow::anyhow!( - "Unknown model '{}'. Available: {}", + "Unknown backend '{}'. Available: {}", name, self.model_names().join(", "), ))?; - let api_base = self.backend.base_url.clone() + let api_base = b.base_url.clone() .ok_or_else(|| anyhow::anyhow!( - "backend.base_url not set in {}", - config_path().display() + "backends.{}.base_url not set in {}", + name, config_path().display() ))?; - let api_key = self.backend.api_key.clone(); Ok(ResolvedModel { name: name.to_string(), api_base, - api_key, - model_id: model.model_id.clone(), - context_window: model.context_window, + api_key: b.api_key.clone(), + model_id: b.model_id.clone(), + context_window: b.context_window, }) } - /// List available model names, sorted. + /// List available backend names, sorted. pub fn model_names(&self) -> Vec { - let mut names: Vec<_> = self.models.keys().cloned().collect(); + let mut names: Vec<_> = self.backends.keys().cloned().collect(); names.sort(); names } @@ -456,8 +450,6 @@ fn build_figment(cli: &crate::user::CliArgs) -> Figment { let mut f = Figment::from(Serialized::defaults(AppConfig::default())) .merge(Json5File(config_path())); - merge_opt!(f, cli.api_key, "backend.api_key"); - merge_opt!(f, cli.api_base, "backend.base_url"); merge_opt!(f, cli.dmn_max_turns, "dmn.max_turns"); if cli.debug { f = f.merge(Serialized::default("debug", true)); @@ -532,24 +524,26 @@ pub fn show_config(app: &AppConfig, figment: &Figment) { } println!("# Effective configuration\n"); - println!("backend:"); - println!(" api_key: {} ({})", mask(&app.backend.api_key), src(figment, "backend.api_key")); - if let Some(ref url) = app.backend.base_url { - println!(" base_url: {:?} ({})", url, src(figment, "backend.base_url")); - } - println!("\ndebug: {} ({})", app.debug, src(figment, "debug")); + println!("debug: {} ({})", app.debug, src(figment, "debug")); println!("\ncompaction:"); println!(" hard_threshold_pct: {} ({})", app.compaction.hard_threshold_pct, src(figment, "compaction.hard_threshold_pct")); println!(" soft_threshold_pct: {} ({})", app.compaction.soft_threshold_pct, src(figment, "compaction.soft_threshold_pct")); println!("\ndmn:"); println!(" max_turns: {} ({})", app.dmn.max_turns, src(figment, "dmn.max_turns")); - println!("\ndefault_model: {:?}", app.default_model); - if !app.models.is_empty() { - println!("\nmodels:"); - for (name, m) in &app.models { + println!("\ndefault_backend: {:?} ({})", app.default_backend, src(figment, "default_backend")); + if !app.backends.is_empty() { + println!("\nbackends:"); + let mut names: Vec<_> = app.backends.keys().cloned().collect(); + names.sort(); + for name in names { + let b = &app.backends[&name]; println!(" {}:", name); - println!(" model_id: {:?}", m.model_id); - if let Some(cw) = m.context_window { + println!(" api_key: {} ({})", mask(&b.api_key), src(figment, &format!("backends.{name}.api_key"))); + if let Some(ref url) = b.base_url { + println!(" base_url: {:?} ({})", url, src(figment, &format!("backends.{name}.base_url"))); + } + println!(" model_id: {:?}", b.model_id); + if let Some(cw) = b.context_window { println!(" context_window: {}", cw); } } From 28484a385b8dfc059f5005d17cd0e72505f5f76a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 15:56:06 -0400 Subject: [PATCH 28/94] config: drop dead fields from Config (memory section) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Four Config fields had no external readers, left over from earlier features that got refactored away: - journal_days, journal_max — journal rotation knobs that nothing actually consults - prompts_dir — the old per-prompt-file directory, obsolete since prompt_file metadata itself went away in a prior cleanup - api_reasoning — a reasoning-mode string that used to flow into the API request, superseded by per-agent reasoning_effort on AgentState All four were only ever assigned to and never read. Drop them from the struct, Default impl, and (as appropriate) deserialization defaults. Co-Authored-By: Proof of Concept --- src/config.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index 39c69ab..4f50947 100644 --- a/src/config.rs +++ b/src/config.rs @@ -58,12 +58,8 @@ pub struct Config { /// Nodes loaded into subconscious agent context #[serde(default)] pub agent_nodes: Vec, - pub journal_days: u32, - pub journal_max: usize, pub llm_concurrency: usize, pub agent_budget: usize, - #[serde(deserialize_with = "deserialize_path")] - pub prompts_dir: PathBuf, /// Resolved from agent_model → models → backend (not in config directly) #[serde(skip)] pub api_base_url: Option, @@ -85,7 +81,6 @@ pub struct Config { /// Number of assistant responses to score per memory. Default: 50. #[serde(default = "default_scoring_response_window")] pub scoring_response_window: usize, - pub api_reasoning: String, pub agent_types: Vec, #[serde(default)] pub mcp_servers: Vec, @@ -118,11 +113,8 @@ impl Default for Config { protected_nodes: Vec::new(), personality_nodes: vec!["identity".into(), "core-practices".into()], agent_nodes: vec!["identity".into(), "core-practices".into()], - journal_days: 7, - journal_max: 20, llm_concurrency: 1, agent_budget: 1000, - prompts_dir: home.join(".consciousness/prompts"), api_base_url: None, api_key: None, api_model: None, @@ -131,7 +123,6 @@ impl Default for Config { scoring_interval_secs: default_scoring_interval_secs(), scoring_response_window: default_scoring_response_window(), agent_model: None, - api_reasoning: "high".to_string(), agent_types: vec![ "linker".into(), "organize".into(), "distill".into(), "separator".into(), "split".into(), From 60de5793054e1dbf93f8cab38dd69415f4674910 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 16:02:43 -0400 Subject: [PATCH 29/94] config: unify subconscious API resolution with the main chat path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two parallel backend-resolution paths had drifted apart: - Main chat: AppConfig::resolve_model() → a named BackendConfig in AppConfig.backends - Subconscious / oneshot / context_window(): four skip-serde "cache" fields on Config (memory section) — api_base_url, api_key, api_model, api_context_window — that used to be populated at Config::try_load_shared time by walking memory.agent_model → root.models[name] → root[backend_name] When we renamed `models` to `backends` and collapsed ModelConfig into BackendConfig, the latter chain started silently dereferencing `root.get("models")` → None → no population. Subconscious agents fell through the "API not configured" guard; context_window() started returning 0 (since api_context_window default is u64's 0 now that we don't populate it). It was only visibly working for the main chat. Collapse to one path: - Drop Config.agent_model (duplicate of AppConfig.default_backend) - Drop Config.{api_base_url, api_key, api_model, api_context_window} — no longer populated, no longer needed - Drop default_context_window() — nobody reads the field anymore - Drop the memory-side resolution block in try_load_shared() - Subconscious (mind/unconscious.rs) and oneshot (agent/oneshot.rs) now call load_app() + resolve_model(&app.default_backend) just like the main chat does - context_window() reads from config::app().backends[default_backend] .context_window, defaulting to 128k only if the backend doesn't specify one Side effect: Kent's config file drops agent_model, api_reasoning, journal_days, journal_max — all fields whose Rust counterparts are now gone. (Figment tolerates unknown fields, so leaving them wouldn't have broken anything, but they were lying about what's configurable.) Co-Authored-By: Proof of Concept --- src/agent/context.rs | 5 ++++- src/agent/oneshot.rs | 15 +++++---------- src/config.rs | 38 +------------------------------------- src/mind/unconscious.rs | 23 +++++++++++------------ 4 files changed, 21 insertions(+), 60 deletions(-) diff --git a/src/agent/context.rs b/src/agent/context.rs index cc8044a..5b51c24 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -992,7 +992,10 @@ impl ContextState { } pub fn context_window() -> usize { - crate::config::get().api_context_window + let app = crate::config::app(); + app.backends.get(&app.default_backend) + .and_then(|b| b.context_window) + .unwrap_or(128_000) } pub fn context_budget_tokens() -> usize { diff --git a/src/agent/oneshot.rs b/src/agent/oneshot.rs index 588a786..1c5ac90 100644 --- a/src/agent/oneshot.rs +++ b/src/agent/oneshot.rs @@ -247,19 +247,14 @@ impl AutoAgent { &mut self, bail_fn: Option<&(dyn Fn(usize) -> Result<(), String> + Sync)>, ) -> Result<(), String> { - let config = crate::config::get(); - let base_url = config.api_base_url.as_deref().unwrap_or(""); - let api_key = config.api_key.as_deref().unwrap_or(""); - let model = config.api_model.as_deref().unwrap_or(""); - if base_url.is_empty() || model.is_empty() { - return Err("API not configured (no base_url or model)".to_string()); - } - let client = super::api::ApiClient::new(base_url, api_key, model); - - // Load system prompt + identity from config + // Load system prompt + identity from config. let cli = crate::user::CliArgs::default(); let (app, _) = crate::config::load_app(&cli) .map_err(|e| format!("config: {}", e))?; + let resolved = app.resolve_model(&app.default_backend) + .map_err(|e| format!("API not configured: {}", e))?; + let client = super::api::ApiClient::new( + &resolved.api_base, &resolved.api_key, &resolved.model_id); let personality = crate::config::reload_context() .await.map_err(|e| format!("config: {}", e))?; diff --git a/src/config.rs b/src/config.rs index 4f50947..5b1726b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -26,7 +26,6 @@ pub fn config_path() -> PathBuf { static CONFIG: OnceLock>> = OnceLock::new(); -fn default_context_window() -> usize { 128_000 } fn default_stream_timeout() -> u64 { 60 } fn default_scoring_interval_secs() -> u64 { 3600 } // 1 hour fn default_scoring_response_window() -> usize { 100 } @@ -60,18 +59,6 @@ pub struct Config { pub agent_nodes: Vec, pub llm_concurrency: usize, pub agent_budget: usize, - /// Resolved from agent_model → models → backend (not in config directly) - #[serde(skip)] - pub api_base_url: Option, - #[serde(skip)] - pub api_key: Option, - #[serde(skip)] - pub api_model: Option, - #[serde(skip, default = "default_context_window")] - pub api_context_window: usize, - /// Used to resolve API settings, not stored on Config - #[serde(default)] - agent_model: Option, /// Stream chunk timeout in seconds (no data = timeout). #[serde(default = "default_stream_timeout")] pub api_stream_timeout_secs: u64, @@ -115,14 +102,9 @@ impl Default for Config { agent_nodes: vec!["identity".into(), "core-practices".into()], llm_concurrency: 1, agent_budget: 1000, - api_base_url: None, - api_key: None, - api_model: None, - api_context_window: default_context_window(), api_stream_timeout_secs: default_stream_timeout(), scoring_interval_secs: default_scoring_interval_secs(), scoring_response_window: default_scoring_response_window(), - agent_model: None, agent_types: vec![ "linker".into(), "organize".into(), "distill".into(), "separator".into(), "split".into(), @@ -153,25 +135,7 @@ impl Config { let mut config: Config = serde_json::from_value(mem_value.clone()).ok()?; config.llm_concurrency = config.llm_concurrency.max(1); - // Resolve API settings: agent_model → models → backend - if let Some(model_name) = &config.agent_model - && let Some(model_cfg) = root.get("models").and_then(|m| m.get(model_name.as_str())) { - let backend_name = model_cfg.get("backend").and_then(|v| v.as_str()).unwrap_or(""); - let model_id = model_cfg.get("model_id").and_then(|v| v.as_str()).unwrap_or(""); - - if let Some(backend) = root.get(backend_name) { - config.api_base_url = backend.get("base_url") - .and_then(|v| v.as_str()).map(String::from); - config.api_key = backend.get("api_key") - .and_then(|v| v.as_str()).map(String::from); - } - config.api_model = Some(model_id.to_string()); - if let Some(cw) = model_cfg.get("context_window").and_then(|v| v.as_u64()) { - config.api_context_window = cw as usize; - } - } - - // Top-level config sections (not inside "memory") + // Top-level sections (not inside "memory"). if let Some(servers) = root.get("lsp_servers") { config.lsp_servers = serde_json::from_value(servers.clone()).unwrap_or_default(); } diff --git a/src/mind/unconscious.rs b/src/mind/unconscious.rs index d8a6aad..4f9a0ca 100644 --- a/src/mind/unconscious.rs +++ b/src/mind/unconscious.rs @@ -275,17 +275,7 @@ pub async fn prepare_spawn(name: &str, mut auto: AutoAgent, wake: std::sync::Arc phase: s.phase.clone(), }).collect()); - // Create standalone Agent — stored so UI can read context - let config = crate::config::get(); - let base_url = config.api_base_url.as_deref().unwrap_or(""); - let api_key = config.api_key.as_deref().unwrap_or(""); - let model = config.api_model.as_deref().unwrap_or(""); - if base_url.is_empty() || model.is_empty() { - dbglog!("[unconscious] API not configured"); - auto.steps = orig_steps; - return Err(auto); - } - + // Create standalone Agent — stored so UI can read context. let cli = crate::user::CliArgs::default(); let (app, _) = match crate::config::load_app(&cli) { Ok(r) => r, @@ -295,9 +285,18 @@ pub async fn prepare_spawn(name: &str, mut auto: AutoAgent, wake: std::sync::Arc return Err(auto); } }; + let resolved = match app.resolve_model(&app.default_backend) { + Ok(r) => r, + Err(e) => { + dbglog!("[unconscious] API not configured: {}", e); + auto.steps = orig_steps; + return Err(auto); + } + }; // Unconscious agents have self-contained prompts — no standard context. - let client = crate::agent::api::ApiClient::new(base_url, api_key, model); + let client = crate::agent::api::ApiClient::new( + &resolved.api_base, &resolved.api_key, &resolved.model_id); let agent = crate::agent::Agent::new( client, Vec::new(), app, None, From 18b7fd05353adb866411d932f3023974b78dd18c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 16:08:20 -0400 Subject: [PATCH 30/94] scoring: drop dead Elo/agent_budget block in consolidation_plan MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The graph-health logic in consolidation_plan_inner computed reasonable agent counts based on graph metrics (α, Gini, hub dominance), then immediately overwrote them with an Elo-weighted flat-budget distribution, or — if no agent-elo.json existed — with a simple budget/N per type. Nothing in the codebase writes agent-elo.json; it's external state that never gets maintained. So the effective behavior was always the "No Elo ratings — equal distribution" branch, which just bucketed agent_budget evenly across active agent types and discarded everything the graph analysis had just decided. Keep the graph-health allocation (α → linker count, Gini → distill bump, organize/distill/split proportional). Drop: - The entire Elo / agent_budget block at the end of consolidation_plan_inner - Config.agent_budget field and its default (1000) - agent_budget: 40 from Kent's config.json5 - The local agent_types binding inside the function — it was only used by the now-deleted block. Config.agent_types stays; it has other consumers. Co-Authored-By: Proof of Concept --- src/config.rs | 2 -- src/hippocampus/neuro/scoring.rs | 46 -------------------------------- 2 files changed, 48 deletions(-) diff --git a/src/config.rs b/src/config.rs index 5b1726b..07c07a3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -58,7 +58,6 @@ pub struct Config { #[serde(default)] pub agent_nodes: Vec, pub llm_concurrency: usize, - pub agent_budget: usize, /// Stream chunk timeout in seconds (no data = timeout). #[serde(default = "default_stream_timeout")] pub api_stream_timeout_secs: u64, @@ -101,7 +100,6 @@ impl Default for Config { personality_nodes: vec!["identity".into(), "core-practices".into()], agent_nodes: vec!["identity".into(), "core-practices".into()], llm_concurrency: 1, - agent_budget: 1000, api_stream_timeout_secs: default_stream_timeout(), scoring_interval_secs: default_scoring_interval_secs(), scoring_response_window: default_scoring_response_window(), diff --git a/src/hippocampus/neuro/scoring.rs b/src/hippocampus/neuro/scoring.rs index 5828fd0..c9cbb40 100644 --- a/src/hippocampus/neuro/scoring.rs +++ b/src/hippocampus/neuro/scoring.rs @@ -230,10 +230,6 @@ fn consolidation_plan_inner(store: &Store, _detect_interf: bool) -> Consolidatio rationale: Vec::new(), }; - // Active agent types from config - let config = crate::config::get(); - let agent_types: Vec<&str> = config.agent_types.iter().map(|s| s.as_str()).collect(); - // Target: α ≥ 2.5 (healthy scale-free) if alpha < 2.0 { plan.add("linker", 100); @@ -274,48 +270,6 @@ fn consolidation_plan_inner(store: &Store, _detect_interf: bool) -> Consolidatio // Split: handle oversized nodes plan.set("split", 5); - // Distribute agent budget using Elo ratings - let budget = crate::config::get().agent_budget; - let elo_path = crate::config::get().data_dir.join("agent-elo.json"); - if let Ok(elo_json) = std::fs::read_to_string(&elo_path) { - if let Ok(ratings) = serde_json::from_str::>(&elo_json) { - let elos: Vec = agent_types.iter() - .map(|t| ratings.get(*t).copied().unwrap_or(1000.0)) - .collect(); - let min_elo = elos.iter().copied().fold(f64::MAX, f64::min); - - let weights: Vec = elos.iter() - .map(|e| { - let shifted = e - min_elo + 50.0; - shifted * shifted - }) - .collect(); - let total_weight: f64 = weights.iter().sum(); - - let allocate = |w: f64| -> usize { - ((w / total_weight * budget as f64).round() as usize).max(2) - }; - - for (i, agent) in agent_types.iter().enumerate() { - plan.set(agent, allocate(weights[i])); - } - - let summary: Vec = agent_types.iter() - .map(|a| format!("{}={}", a, plan.count(a))) - .collect(); - plan.rationale.push(format!( - "Elo allocation (budget={}): {}", budget, summary.join(" "))); - } - } else { - // No Elo file — use budget with equal distribution - let per_type = budget / agent_types.len(); - for agent in &agent_types { - plan.set(agent, per_type); - } - plan.rationale.push(format!( - "No Elo ratings — equal distribution ({} each, budget={})", per_type, budget)); - } - plan } From dd551fe5512f040e7ebe1c0287a80352e8952526 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 16:14:43 -0400 Subject: [PATCH 31/94] config: watch config.json5 with inotify, reload live on change MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both config halves (Config for the memory section, AppConfig globally) are now reloaded whenever ~/.consciousness/config.json5 changes on disk. So edits from vim, manual tweaks, or F6's own config_writer calls all land without a restart. No more "reload the daemon to pick up a config change." Wires up the previously-unused Config::reload() (Kent flagged it as "not dead, just not wired"). Pairs it with an AppConfig reload via install_app(). Both run on the same file-change event. Implementation: - notify-debouncer-mini watches the config file's parent directory (editors usually replace-via-rename, so watching the file itself misses the new inode). Debounced at 200ms to coalesce the flurry of events editors produce around a single save. - Filter for events whose path is the actual config file. - On match: call reload() for Config, run build_figment + extract for AppConfig. If AppConfig parsing fails (editor mid-save with partial content), log and keep the old cached value. - Watcher runs in its own named thread, fire-and-forget. If startup fails we just log and move on — worst case is no live reload, not a crash. CliArgs + SubCmd both get Clone derives so the watcher can own a snapshot of the startup args for future reloads. Watcher is kicked off in user/mod.rs:start() right after load_session. Co-Authored-By: Proof of Concept --- Cargo.lock | 181 +++++++++++++++++++++++++++++++++++++++++++++--- Cargo.toml | 1 + src/config.rs | 57 +++++++++++++++ src/user/mod.rs | 7 +- 4 files changed, 235 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd4b79f..dfca607 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -497,6 +497,7 @@ dependencies = [ "log", "memchr", "memmap2", + "notify-debouncer-mini", "paste", "peg", "ratatui", @@ -1088,6 +1089,15 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + [[package]] name = "futures" version = "0.3.32" @@ -1453,6 +1463,26 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" +[[package]] +name = "inotify" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd5b3eaf1a28b758ac0faa5a4254e8ab2705605496f1b1f3fbbc3988ad73d199" +dependencies = [ + "bitflags 2.11.0", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + [[package]] name = "instability" version = "0.3.12" @@ -1562,6 +1592,26 @@ dependencies = [ "thiserror 2.0.18", ] +[[package]] +name = "kqueue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + [[package]] name = "lab" version = "0.11.0" @@ -1784,6 +1834,45 @@ dependencies = [ "memchr", ] +[[package]] +name = "notify" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3" +dependencies = [ + "bitflags 2.11.0", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio", + "notify-types", + "walkdir", + "windows-sys 0.60.2", +] + +[[package]] +name = "notify-debouncer-mini" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17849edfaabd9a5fef1c606d99cfc615a8e99f7ac4366406d86c7942a3184cf2" +dependencies = [ + "log", + "notify", + "notify-types", + "tempfile", +] + +[[package]] +name = "notify-types" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42b8cfee0e339a0337359f3c88165702ac6e600dc01c0cc9579a92d62b08477a" +dependencies = [ + "bitflags 2.11.0", +] + [[package]] name = "num-conv" version = "0.2.1" @@ -3810,7 +3899,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", ] [[package]] @@ -3828,14 +3926,31 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -3844,48 +3959,96 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + [[package]] name = "wit-bindgen" version = "0.51.0" diff --git a/Cargo.toml b/Cargo.toml index ea42bfa..7cdf851 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ log = "0.4" serde = { version = "1", features = ["derive"] } serde_json = "1" json-five = "0.3" +notify-debouncer-mini = "0.7" ratatui = { version = "0.30", features = ["unstable-rendered-line-info"] } tui-markdown = { git = "https://github.com/koverstreet/tui-markdown", subdirectory = "tui-markdown" } diff --git a/src/config.rs b/src/config.rs index 07c07a3..d00d4d7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -166,6 +166,63 @@ pub fn reload() -> bool { changed } +/// Spawn a background thread that watches `~/.consciousness/config.json5` +/// and reloads both the memory Config and the global AppConfig whenever +/// the file changes on disk. Lets edits from vim / F6 hotkeys / manual +/// tweaks land live without restarting the process. +pub fn watch_config(cli: crate::user::CliArgs) { + use notify_debouncer_mini::{new_debouncer, notify::RecursiveMode}; + + let path = config_path(); + // Watch the parent directory — editors often replace-via-rename, so + // watching the file itself misses the new inode. + let Some(parent) = path.parent().map(|p| p.to_path_buf()) else { + crate::dbglog!("[config] no parent for {}, skipping watch", path.display()); + return; + }; + + std::thread::Builder::new() + .name("config-watcher".into()) + .spawn(move || { + let (tx, rx) = std::sync::mpsc::channel(); + let mut debouncer = match new_debouncer(std::time::Duration::from_millis(200), tx) { + Ok(d) => d, + Err(e) => { + crate::dbglog!("[config] watcher setup failed: {}", e); + return; + } + }; + if let Err(e) = debouncer.watcher() + .watch(&parent, RecursiveMode::NonRecursive) + { + crate::dbglog!("[config] watch({}) failed: {}", parent.display(), e); + return; + } + crate::dbglog!("[config] watching {}", path.display()); + + while let Ok(res) = rx.recv() { + let Ok(events) = res else { continue; }; + if !events.iter().any(|e| e.path == path) { continue; } + + // Reload both halves. + let mem_changed = reload(); + let app_changed = match build_figment(&cli).extract::() { + Ok(app) => { + install_app(app); + true + } + Err(e) => { + crate::dbglog!("[config] reload: AppConfig parse failed: {}", e); + false + } + }; + crate::dbglog!("[config] reloaded (memory_changed={}, app_changed={})", + mem_changed, app_changed); + } + }) + .ok(); +} + // ============================================================ // Agent config (top-level settings) // ============================================================ diff --git a/src/user/mod.rs b/src/user/mod.rs index 9d33f11..93da72c 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -228,6 +228,9 @@ fn restore_terminal(terminal: &mut ratatui::Terminal Result<()> { let (config, _figment) = crate::config::load_session(&cli).await?; + // Pick up external edits (vim, F6 hotkeys, etc.) without restart. + crate::config::watch_config(cli.clone()); + if config.app.debug { unsafe { std::env::set_var("POC_DEBUG", "1") }; } @@ -599,7 +602,7 @@ async fn run( use clap::{Parser, Subcommand}; -#[derive(Parser, Debug, Default)] +#[derive(Parser, Debug, Default, Clone)] #[command(name = "consciousness", about = "Substrate-independent AI agent")] pub struct CliArgs { /// Model override (selects a named entry from `models` in config.json5) @@ -634,7 +637,7 @@ pub struct CliArgs { pub command: Option, } -#[derive(Subcommand, Debug)] +#[derive(Subcommand, Debug, Clone)] pub enum SubCmd { /// Print new output since last read and exit Read { From 592a3e2e52aae0f54f5a80617583fc5d144e04a4 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 16:20:17 -0400 Subject: [PATCH 32/94] config: move user_name/assistant_name to AppConfig (top level) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are identity settings, not memory-graph settings. Sat inside the \`memory\` section only because that's where Config started life. Move to AppConfig alongside the other top-level stuff. Readers now pull from \`config::app()\` instead of \`config::get()\`. subconscious/defs.rs's conversation-building pass still needs Config for surface_conversation_bytes, so both guards coexist there — AppConfig's guard is dropped before the per-step await loop so we don't stall the config-watcher's writer. show_config picks up the two new fields at the top of its output. Kent's config already has them hoisted to the top level. Co-Authored-By: Proof of Concept --- src/agent/context.rs | 6 +++--- src/agent/oneshot.rs | 4 ++-- src/cli/node.rs | 2 +- src/config.rs | 17 ++++++++++++----- src/mind/subconscious.rs | 3 ++- src/subconscious/defs.rs | 15 +++++++++------ 6 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/agent/context.rs b/src/agent/context.rs index 5b51c24..37dbf48 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -374,7 +374,7 @@ impl AstNode { /// Short label for the UI. pub fn label(&self) -> String { - let cfg = crate::config::get(); + let app = crate::config::app(); match self { Self::Branch { role, children, .. } => { let preview = children.first() @@ -383,8 +383,8 @@ impl AstNode { .unwrap_or_default(); match role { Role::System => "system".into(), - Role::User => format!("{}: {}", cfg.user_name, preview), - Role::Assistant => format!("{}: {}", cfg.assistant_name, preview), + Role::User => format!("{}: {}", app.user_name, preview), + Role::Assistant => format!("{}: {}", app.assistant_name, preview), } } Self::Leaf(leaf) => match &leaf.body { diff --git a/src/agent/oneshot.rs b/src/agent/oneshot.rs index 1c5ac90..8bc8b53 100644 --- a/src/agent/oneshot.rs +++ b/src/agent/oneshot.rs @@ -183,8 +183,8 @@ fn resolve_prompt( state: &std::collections::BTreeMap, recently_written: &[String], ) -> String { - let cfg = crate::config::get(); - let template = template.replace("{assistant_name}", &cfg.assistant_name); + let template = template.replace("{assistant_name}", + &crate::config::app().assistant_name); let mut result = String::with_capacity(template.len()); let mut rest = template.as_str(); while let Some(start) = rest.find("{{") { diff --git a/src/cli/node.rs b/src/cli/node.rs index 5472505..c4305a7 100644 --- a/src/cli/node.rs +++ b/src/cli/node.rs @@ -197,7 +197,7 @@ pub async fn cmd_load_context(stats: bool) -> Result<()> { return Ok(()); } - println!("=== MEMORY SYSTEM ({}) ===", cfg.assistant_name); + println!("=== MEMORY SYSTEM ({}) ===", crate::config::app().assistant_name); if !personality.is_empty() { println!("--- personality_nodes ({}) ---", personality.len()); diff --git a/src/config.rs b/src/config.rs index d00d4d7..b7ea597 100644 --- a/src/config.rs +++ b/src/config.rs @@ -40,8 +40,6 @@ fn default_identity_dir() -> PathBuf { #[derive(Debug, Clone, Deserialize)] #[serde(default)] pub struct Config { - pub user_name: String, - pub assistant_name: String, #[serde(deserialize_with = "deserialize_path")] pub data_dir: PathBuf, #[serde(default = "default_identity_dir", deserialize_with = "deserialize_path")] @@ -91,8 +89,6 @@ impl Default for Config { fn default() -> Self { let home = dirs::home_dir().unwrap_or_default(); Self { - user_name: "User".to_string(), - assistant_name: "Assistant".to_string(), data_dir: home.join(".consciousness/memory"), identity_dir: home.join(".consciousness/identity"), projects_dir: home.join(".claude/projects"), @@ -229,6 +225,10 @@ pub fn watch_config(cli: crate::user::CliArgs) { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AppConfig { + #[serde(default = "default_user_name")] + pub user_name: String, + #[serde(default = "default_assistant_name")] + pub assistant_name: String, /// Named model endpoints — credentials, base URL, and model id bundled /// into one entry per backend. Keyed by name, selected by /// `default_backend` or by `--model ` on the CLI. @@ -315,9 +315,14 @@ impl Default for LearnConfig { } } +fn default_user_name() -> String { "User".into() } +fn default_assistant_name() -> String { "Assistant".into() } + impl Default for AppConfig { fn default() -> Self { Self { + user_name: default_user_name(), + assistant_name: default_assistant_name(), backends: HashMap::new(), default_backend: String::new(), debug: false, @@ -534,7 +539,9 @@ pub fn show_config(app: &AppConfig, figment: &Figment) { } println!("# Effective configuration\n"); - println!("debug: {} ({})", app.debug, src(figment, "debug")); + println!("user_name: {:?} ({})", app.user_name, src(figment, "user_name")); + println!("assistant_name: {:?} ({})", app.assistant_name, src(figment, "assistant_name")); + println!("\ndebug: {} ({})", app.debug, src(figment, "debug")); println!("\ncompaction:"); println!(" hard_threshold_pct: {} ({})", app.compaction.hard_threshold_pct, src(figment, "compaction.hard_threshold_pct")); println!(" soft_threshold_pct: {} ({})", app.compaction.soft_threshold_pct, src(figment, "compaction.soft_threshold_pct")); diff --git a/src/mind/subconscious.rs b/src/mind/subconscious.rs index 15c8b04..21cc549 100644 --- a/src/mind/subconscious.rs +++ b/src/mind/subconscious.rs @@ -92,7 +92,8 @@ impl State { /// Generate the DMN prompt for the current state, informed by /// user presence and error patterns. pub fn prompt(&self, ctx: &DmnContext) -> String { - let user = &crate::config::get().user_name; + let app = crate::config::app(); + let user = &app.user_name; let idle_info = if ctx.user_idle < Duration::from_secs(60) { format!("{} is here (active recently).", user) diff --git a/src/subconscious/defs.rs b/src/subconscious/defs.rs index 8828043..a862c8d 100644 --- a/src/subconscious/defs.rs +++ b/src/subconscious/defs.rs @@ -396,13 +396,14 @@ fn resolve_conversation(budget: Option) -> String { let cfg = crate::config::get(); let max_bytes = budget.unwrap_or_else(|| cfg.surface_conversation_bytes.unwrap_or(100_000)); + let app = crate::config::app(); let mut fragments: Vec = Vec::new(); let mut total_bytes = 0; let mut oldest_ts = String::new(); for (role, content, ts) in iter { if total_bytes >= max_bytes { break; } - let name = if role == "user" { &cfg.user_name } else { &cfg.assistant_name }; + let name = if role == "user" { &app.user_name } else { &app.assistant_name }; let formatted = if !ts.is_empty() { oldest_ts = ts[..ts.floor_char_boundary(ts.len().min(19))].to_string(); format!("**{}** {}: {}", name, &oldest_ts, content) @@ -623,11 +624,13 @@ pub async fn run_agent( let mut all_keys = keys; let mut resolved_steps = Vec::new(); for step in &def.steps { - let cfg = crate::config::get(); - let template = step.prompt - .replace("{agent_name}", &def.agent) - .replace("{user_name}", &cfg.user_name) - .replace("{assistant_name}", &cfg.assistant_name); + let template = { + let app = crate::config::app(); + step.prompt + .replace("{agent_name}", &def.agent) + .replace("{user_name}", &app.user_name) + .replace("{assistant_name}", &app.assistant_name) + }; let (prompt, extra_keys) = resolve_placeholders(&template, &all_keys, count).await; all_keys.extend(extra_keys); resolved_steps.push(super::prompts::ResolvedStep { From 0bf71b91101c644d03721442b4bfea242ff67a7e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 18:00:10 -0400 Subject: [PATCH 33/94] agent: add NodeBody::Image for Qwen3-VL vision input MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Images are rendered as `<|vision_start|>` + N × `<|image_pad|>` + `<|vision_end|>` where N is computed from the image dimensions using Qwen3-VL's smart_resize rules (patch_size=16, merge_size=2, min=64K, max=16M pixels). The token count matches what vLLM will produce at request time, so budget accounting stays accurate. Bytes are stored inline on the leaf and base64-encoded in the JSON form. Token IDs are hand-assembled instead of re-running the tokenizer on a potentially-huge placeholder string. Follow-ups: view_image tool rewrite, multi_modal_data on the vLLM request, API-layer plumbing from leaf bytes to request body. Co-Authored-By: Proof of Concept --- src/agent/context.rs | 223 +++++++++++++++++++++++++++++++++++++---- src/agent/tokenizer.rs | 3 + src/user/chat.rs | 5 + 3 files changed, 211 insertions(+), 20 deletions(-) diff --git a/src/agent/context.rs b/src/agent/context.rs index 37dbf48..57b2c7a 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -81,10 +81,33 @@ pub enum NodeBody { Memory { key: String, text: String, score: Option }, Dmn(String), + // Vision input — rendered as <|vision_start|> <|image_pad|>×N <|vision_end|>. + // `token_count` is N, the count vLLM will compute for this image's grid. + Image { + #[serde(with = "b64_bytes")] + bytes: Vec, + mime: String, + orig_height: u32, + orig_width: u32, + token_count: u32, + }, + // Non-visible (0 tokens in prompt) Log(String), } +mod b64_bytes { + use base64::{Engine, engine::general_purpose::STANDARD}; + use serde::{Serializer, Deserializer, Deserialize}; + pub fn serialize(bytes: &[u8], s: S) -> Result { + s.serialize_str(&STANDARD.encode(bytes)) + } + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { + let s = String::deserialize(d)?; + STANDARD.decode(s).map_err(serde::de::Error::custom) + } +} + /// A leaf node: typed content with cached token IDs. /// Token IDs are not serialized — they're recomputed on deserialization. #[derive(Debug, Clone, Serialize)] @@ -103,11 +126,7 @@ impl<'de> Deserialize<'de> for NodeLeaf { timestamp: DateTime, } let raw = Raw::deserialize(deserializer)?; - let token_ids = if raw.body.is_prompt_visible() { - tokenizer::encode(&raw.body.render()) - } else { - vec![] - }; + let token_ids = raw.body.compute_token_ids(); Ok(NodeLeaf { body: raw.body, token_ids, timestamp: raw.timestamp }) } } @@ -221,6 +240,13 @@ impl NodeBody { out.push_str(text); out.push_str("<|im_end|>\n"); } + Self::Image { token_count, .. } => { + out.push_str("<|vision_start|>"); + for _ in 0..*token_count { + out.push_str("<|image_pad|>"); + } + out.push_str("<|vision_end|>"); + } } } @@ -235,6 +261,26 @@ impl NodeBody { !matches!(self, Self::Thinking(_) | Self::Log(_)) } + /// Hand-assemble token IDs for body types where running the tokenizer + /// on the rendered text would be needlessly expensive (Image). Falls + /// back to encoding the rendered text for everything else. + fn compute_token_ids(&self) -> Vec { + if !self.is_prompt_visible() { + return Vec::new(); + } + match self { + Self::Image { token_count, .. } => { + let mut ids = Vec::with_capacity(*token_count as usize + 2); + ids.push(tokenizer::VISION_START); + ids.extend(std::iter::repeat(tokenizer::IMAGE_PAD) + .take(*token_count as usize)); + ids.push(tokenizer::VISION_END); + ids + } + _ => tokenizer::encode(&self.render()), + } + } + /// The text content of this leaf (for display, not rendering). pub fn text(&self) -> &str { match self { @@ -242,17 +288,14 @@ impl NodeBody { | Self::ToolResult(t) | Self::Dmn(t) => t, Self::ToolCall { name, .. } => name, Self::Memory { text, .. } => text, + Self::Image { mime, .. } => mime, } } } impl NodeLeaf { fn new(body: NodeBody) -> Self { - let token_ids = if body.is_prompt_visible() { - tokenizer::encode(&body.render()) - } else { - vec![] - }; + let token_ids = body.compute_token_ids(); Self { body, token_ids, timestamp: Utc::now() } } @@ -305,6 +348,24 @@ impl AstNode { Self::Leaf(NodeLeaf::new(NodeBody::Log(text.into()))) } + /// Build an Image leaf. `token_count` is computed from the image + /// dimensions using Qwen3-VL's resizing rules. + pub fn image( + bytes: Vec, + mime: impl Into, + orig_height: u32, + orig_width: u32, + ) -> Self { + let token_count = qwen3_image_token_count(orig_height, orig_width); + Self::Leaf(NodeLeaf::new(NodeBody::Image { + bytes, + mime: mime.into(), + orig_height, + orig_width, + token_count, + })) + } + // -- Branch constructors -------------------------------------------------- pub fn branch(role: Role, children: Vec) -> Self { @@ -334,11 +395,7 @@ impl AstNode { pub fn retokenize(self) -> Self { match self { Self::Leaf(leaf) => { - let token_ids = if leaf.body.is_prompt_visible() { - tokenizer::encode(&leaf.body.render()) - } else { - vec![] - }; + let token_ids = leaf.body.compute_token_ids(); Self::Leaf(NodeLeaf { token_ids, ..leaf }) } Self::Branch { role, children, timestamp, memory_scores } => Self::Branch { @@ -397,6 +454,8 @@ impl AstNode { None => format!("mem: {}", key), }, NodeBody::Dmn(_) => "dmn".into(), + NodeBody::Image { orig_height, orig_width, token_count, .. } => + format!("image: {}x{} ({} tokens)", orig_width, orig_height, token_count), NodeBody::Log(t) => format!("log: {}", truncate_preview(t, 60)), }, } @@ -857,11 +916,7 @@ impl ContextState { let node = &mut nodes[index]; match node { AstNode::Leaf(leaf) => { - let token_ids = if body.is_prompt_visible() { - tokenizer::encode(&body.render()) - } else { - vec![] - }; + let token_ids = body.compute_token_ids(); leaf.body = body; leaf.token_ids = token_ids; } @@ -991,6 +1046,58 @@ impl ContextState { } } +// --------------------------------------------------------------------------- +// Qwen3-VL image token count +// +// Port of Qwen2VLImageProcessor.smart_resize + image_token_count. We need the +// exact same answer that vLLM's Qwen3VL processor will produce, because the +// token stream in our context must match what vLLM expands `<|image_pad|>` +// to at request time. Constants come from Qwen3.5-27B's preprocessor_config. +// --------------------------------------------------------------------------- + +const QWEN3_PATCH_SIZE: u32 = 16; +const QWEN3_MERGE_SIZE: u32 = 2; +const QWEN3_MIN_PIXELS: u64 = 65_536; +const QWEN3_MAX_PIXELS: u64 = 16_777_216; + +fn smart_resize(h: u32, w: u32, factor: u32, min_pixels: u64, max_pixels: u64) -> (u32, u32) { + let max_s = h.max(w) as f64; + let min_s = h.min(w) as f64; + assert!(max_s / min_s <= 200.0, "aspect ratio too extreme: {}x{}", h, w); + + let fh = h as f64; + let fw = w as f64; + let ff = factor as f64; + + let h_bar = ((fh / ff).round() as u32) * factor; + let w_bar = ((fw / ff).round() as u32) * factor; + let total = (h_bar as u64) * (w_bar as u64); + + if total > max_pixels { + let beta = ((fh * fw) / max_pixels as f64).sqrt(); + let hf = ((fh / beta / ff).floor() as u32) * factor; + let wf = ((fw / beta / ff).floor() as u32) * factor; + (hf.max(factor), wf.max(factor)) + } else if total < min_pixels { + let beta = (min_pixels as f64 / (fh * fw)).sqrt(); + let hc = ((fh * beta / ff).ceil() as u32) * factor; + let wc = ((fw * beta / ff).ceil() as u32) * factor; + (hc, wc) + } else { + (h_bar, w_bar) + } +} + +/// Compute how many `<|image_pad|>` tokens vLLM will emit for an image of +/// the given dimensions. Matches Qwen3VL's feature-size calculation exactly: +/// (grid_h * grid_w) / merge_size^2 +/// where (grid_h, grid_w) = resized dims / patch_size. +fn qwen3_image_token_count(orig_h: u32, orig_w: u32) -> u32 { + let factor = QWEN3_PATCH_SIZE * QWEN3_MERGE_SIZE; + let (rh, rw) = smart_resize(orig_h, orig_w, factor, QWEN3_MIN_PIXELS, QWEN3_MAX_PIXELS); + (rh / QWEN3_PATCH_SIZE) * (rw / QWEN3_PATCH_SIZE) / (QWEN3_MERGE_SIZE * QWEN3_MERGE_SIZE) +} + pub fn context_window() -> usize { let app = crate::config::app(); app.backends.get(&app.default_backend) @@ -1370,6 +1477,82 @@ mod tests { assert!(serde_json::from_str::(json).is_err()); } + // -- Image leaf tests --------------------------------------------------------- + + #[test] + fn test_smart_resize_within_bounds() { + // Typical case: 1024x768 → rounded to multiples of 32, under max. + let (h, w) = smart_resize(768, 1024, 32, 65_536, 16_777_216); + assert_eq!(h, 768); + assert_eq!(w, 1024); + } + + #[test] + fn test_smart_resize_upscales_tiny() { + // 32x32 = 1024 pixels, below min_pixels=65536. Should scale up. + let (h, w) = smart_resize(32, 32, 32, 65_536, 16_777_216); + assert!((h as u64) * (w as u64) >= 65_536, + "resized {}x{} is under min_pixels", h, w); + assert_eq!(h % 32, 0); + assert_eq!(w % 32, 0); + } + + #[test] + fn test_smart_resize_downscales_huge() { + // 8000x6000 = 48M pixels, above max_pixels=16M. Should scale down. + let (h, w) = smart_resize(8000, 6000, 32, 65_536, 16_777_216); + assert!((h as u64) * (w as u64) <= 16_777_216, + "resized {}x{} exceeds max_pixels", h, w); + assert_eq!(h % 32, 0); + assert_eq!(w % 32, 0); + } + + #[test] + fn test_qwen3_token_count_matches_formula() { + // 512x512 → resized to 512x512 (already multiple of 32, within bounds). + // grid = 32x32, tokens = 32*32/4 = 256. + assert_eq!(qwen3_image_token_count(512, 512), 256); + } + + #[test] + fn test_image_render_and_token_ids() { + let node = AstNode::image(vec![0u8, 1, 2, 3], "image/png", 512, 512); + let leaf = node.leaf().unwrap(); + // 3 tokens of bookend + 256 image_pad tokens + assert_eq!(leaf.token_ids().len(), 258); + assert_eq!(leaf.token_ids()[0], tokenizer::VISION_START); + assert_eq!(leaf.token_ids()[257], tokenizer::VISION_END); + for pad in &leaf.token_ids()[1..257] { + assert_eq!(*pad, tokenizer::IMAGE_PAD); + } + // Rendered text has the expected bookends. + let rendered = leaf.body().render(); + assert!(rendered.starts_with("<|vision_start|>")); + assert!(rendered.ends_with("<|vision_end|>")); + } + + #[test] + fn test_image_serde_roundtrip() { + let node = AstNode::image(vec![0xDE, 0xAD, 0xBE, 0xEF], "image/png", 64, 64); + let json = serde_json::to_string(&node).unwrap(); + // bytes must be base64-encoded in the JSON form + assert!(json.contains("3q2+7w==")); + let back: AstNode = serde_json::from_str(&json).unwrap(); + let leaf = back.leaf().unwrap(); + match leaf.body() { + NodeBody::Image { bytes, mime, orig_height, orig_width, token_count } => { + assert_eq!(bytes, &[0xDE, 0xAD, 0xBE, 0xEF]); + assert_eq!(mime, "image/png"); + assert_eq!(*orig_height, 64); + assert_eq!(*orig_width, 64); + assert_eq!(*token_count, qwen3_image_token_count(64, 64)); + } + other => panic!("expected Image, got {:?}", other), + } + // token_ids are recomputed on deserialization + assert_eq!(leaf.token_ids().len(), leaf.tokens()); + } + #[test] fn test_timestamp_present_accepted() { let json = r#"{"Leaf":{"body":{"Content":"hi"},"timestamp":"2026-04-16T12:00:00Z"}}"#; diff --git a/src/agent/tokenizer.rs b/src/agent/tokenizer.rs index 85ac823..cd0acaf 100644 --- a/src/agent/tokenizer.rs +++ b/src/agent/tokenizer.rs @@ -16,6 +16,9 @@ static TOKENIZER: OnceLock = OnceLock::new(); /// Special token IDs for Qwen 3.5 pub const IM_START: u32 = 248045; pub const IM_END: u32 = 248046; +pub const VISION_START: u32 = 248053; +pub const VISION_END: u32 = 248054; +pub const IMAGE_PAD: u32 = 248056; /// Initialize the global tokenizer from a file path. /// Call once at startup. Panics if the file can't be loaded. diff --git a/src/user/chat.rs b/src/user/chat.rs index 47c5d56..fe3db5b 100644 --- a/src/user/chat.rs +++ b/src/user/chat.rs @@ -486,6 +486,11 @@ impl InteractScreen { if t.is_empty() { vec![] } else { vec![(PaneTarget::ToolResult, text, Marker::None)] } } + NodeBody::Image { orig_height, orig_width, .. } => { + vec![(PaneTarget::Conversation, + format!("[image {}x{}]", orig_width, orig_height), + Marker::None)] + } } } AstNode::Branch { role, children, .. } => { From 91106deaa12233aeab38d84644e13de5b97d9dda Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 18:06:25 -0400 Subject: [PATCH 34/94] agent: rewrite view_image to emit Image leaves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit view_image now reads the file, grabs dimensions via imagesize (no full decode), and pushes a user-role branch containing a NodeBody::Image leaf straight into the conversation. The tool_result is just a short acknowledgment — the actual pixels ride in the Image leaf for the API layer to extract into multi_modal_data. Drops the capture_tmux_pane path, which had no business living under "vision" (tmux text capture belongs in bash or a dedicated tool, and this one just returned rendered text anyway). Co-Authored-By: Proof of Concept --- Cargo.lock | 7 +++ Cargo.toml | 1 + src/agent/tools/mod.rs | 8 +-- src/agent/tools/vision.rs | 104 ++++++++++++++------------------------ 4 files changed, 48 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dfca607..c76a7cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -492,6 +492,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", + "imagesize", "json-five", "libc", "log", @@ -1423,6 +1424,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "imagesize" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09e54e57b4c48b40f7aec75635392b12b3421fa26fe8b4332e63138ed278459c" + [[package]] name = "indexmap" version = "2.14.0" diff --git a/Cargo.toml b/Cargo.toml index 7cdf851..0996f94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,6 +68,7 @@ hyper-util = { version = "0.1", features = ["tokio"], default-features = false } http-body-util = "0.1" bytes = "1" base64 = "0.22" +imagesize = "0.14" rustls = "0.23" tokio-rustls = "0.26" diff --git a/src/agent/tools/mod.rs b/src/agent/tools/mod.rs index f72b015..8904fc3 100644 --- a/src/agent/tools/mod.rs +++ b/src/agent/tools/mod.rs @@ -242,13 +242,7 @@ pub fn summarize_args(tool_name: &str, args: &serde_json::Value) -> String { .as_str() .unwrap_or("") .to_string(), - "view_image" => { - if let Some(pane) = args["pane_id"].as_str() { - format!("pane {}", pane) - } else { - args["file_path"].as_str().unwrap_or("").to_string() - } - } + "view_image" => args["file_path"].as_str().unwrap_or("").to_string(), "journal" => { let entry = args["entry"].as_str().unwrap_or(""); if entry.len() > 60 { diff --git a/src/agent/tools/vision.rs b/src/agent/tools/vision.rs index 83559f6..0e36888 100644 --- a/src/agent/tools/vision.rs +++ b/src/agent/tools/vision.rs @@ -1,96 +1,71 @@ -use std::sync::Arc; // tools/vision.rs — Image viewing tool // -// Reads image files from disk and returns them as base64 data URIs -// for multimodal models. Also supports capturing tmux pane contents -// as screenshots. +// Reads an image file from disk, decodes its dimensions, and injects it +// into the context as a user-role message containing a NodeBody::Image +// leaf. The leaf carries raw bytes; the API layer extracts them into +// multi_modal_data when building vLLM requests. + +use std::sync::Arc; use anyhow::{Context, Result}; -use base64::Engine; use serde::Deserialize; +use crate::agent::context::{AstNode, Role, Section}; + #[derive(Deserialize)] struct Args { - file_path: Option, - pane_id: Option, - #[serde(default = "default_lines")] - lines: usize, + file_path: String, } -fn default_lines() -> usize { 50 } - pub fn tool() -> super::Tool { super::Tool { name: "view_image", - description: "View an image file or capture a tmux pane screenshot. Supports PNG, JPEG, GIF, WebP. Use pane_id to capture a tmux pane instead.", - parameters_json: r#"{"type":"object","properties":{"file_path":{"type":"string","description":"Path to an image file"},"pane_id":{"type":"string","description":"Tmux pane ID to capture (e.g. '0:1.0')"},"lines":{"type":"integer","description":"Lines to capture from tmux pane (default 50)"}}}"#, - handler: Arc::new(|_a, v| Box::pin(async move { view_image_text(&v) })), + description: "View an image file. Supports PNG, JPEG, GIF, WebP, BMP. The image is inserted into the conversation and can be analyzed by the vision model.", + parameters_json: r#"{"type":"object","properties":{"file_path":{"type":"string","description":"Path to the image file"}},"required":["file_path"]}"#, + handler: Arc::new(|agent, v| Box::pin(async move { + view_image(agent, v).await + })), } } -fn view_image_text(args: &serde_json::Value) -> anyhow::Result { - let a: Args = serde_json::from_value(args.clone()) +const MAX_SIZE: usize = 20 * 1024 * 1024; + +async fn view_image( + agent: Option>, + args: serde_json::Value, +) -> Result { + let a: Args = serde_json::from_value(args) .context("invalid view_image arguments")?; - if let Some(ref pane_id) = a.pane_id { - return capture_tmux_pane(pane_id, a.lines); - } - - let file_path = a.file_path - .as_deref() - .context("view_image requires either file_path or pane_id")?; - - let path = std::path::Path::new(file_path); + let path = std::path::Path::new(&a.file_path); if !path.exists() { - anyhow::bail!("File not found: {}", file_path); + anyhow::bail!("file not found: {}", a.file_path); } - let data = std::fs::read(path).with_context(|| format!("Failed to read {}", file_path))?; + let bytes = std::fs::read(path) + .with_context(|| format!("reading {}", a.file_path))?; - // Sanity check file size (don't send huge images) - const MAX_SIZE: usize = 20 * 1024 * 1024; // 20 MB - if data.len() > MAX_SIZE { + if bytes.len() > MAX_SIZE { anyhow::bail!( - "Image too large: {} bytes (max {} MB)", - data.len(), - MAX_SIZE / (1024 * 1024) + "image too large: {} bytes (max {} MB)", + bytes.len(), MAX_SIZE / (1024 * 1024), ); } + let dim = imagesize::blob_size(&bytes) + .with_context(|| format!("decoding dimensions of {}", a.file_path))?; + let (w, h) = (dim.width as u32, dim.height as u32); let mime = mime_from_extension(path); - let b64 = base64::engine::general_purpose::STANDARD.encode(&data); - let data_uri = format!("data:{};base64,{}", mime, b64); - Ok(format!("Image loaded: {} ({}, {} bytes)\n{}", file_path, mime, data.len(), data_uri)) -} + let image_leaf = AstNode::image(bytes.clone(), mime, h, w); + let token_count = image_leaf.leaf().unwrap().tokens().saturating_sub(2); -/// Capture a tmux pane's text content. -fn capture_tmux_pane(pane_id: &str, lines: usize) -> Result { + let agent = agent.context("view_image requires agent context")?; + let branch = AstNode::branch(Role::User, vec![image_leaf]); + agent.context.lock().await.push_log(Section::Conversation, branch); - // Use tmux capture-pane to get text content, then render to image - // via a simple approach: capture text and return it (the model can - // read text directly, which is often more useful than a screenshot). - // - // For actual pixel-level screenshots we'd need a terminal renderer, - // but text capture covers 95% of use cases. - let output = std::process::Command::new("tmux") - .args(["capture-pane", "-t", pane_id, "-p", "-S", &format!("-{}", lines)]) - .output() - .context("Failed to run tmux capture-pane")?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - anyhow::bail!("tmux capture-pane failed: {}", stderr.trim()); - } - - let text = String::from_utf8_lossy(&output.stdout).to_string(); - - // Return as text — the model can read terminal output directly. - // This is actually more useful than a screenshot for most tasks. - Ok(format!( - "Tmux pane {} (last {} lines):\n```\n{}\n```", - pane_id, lines, text.trim_end() - )) + Ok(format!("loaded {} ({}, {}x{}, {} tokens)", + a.file_path, mime, w, h, token_count)) } fn mime_from_extension(path: &std::path::Path) -> &'static str { @@ -104,8 +79,7 @@ fn mime_from_extension(path: &std::path::Path) -> &'static str { Some("jpg" | "jpeg") => "image/jpeg", Some("gif") => "image/gif", Some("webp") => "image/webp", - Some("svg") => "image/svg+xml", Some("bmp") => "image/bmp", - _ => "image/png", // default assumption + _ => "application/octet-stream", } } From 204ba5570ae0f4af31c6434551aa4eeae38d3902 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 18:08:26 -0400 Subject: [PATCH 35/94] agent: send images as multi_modal_data on completion requests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split the prompt assembly into two forms: the AST keeps the fully-expanded representation (N image_pads per image, for accurate context budget accounting), while the request wire form collapses each image to a single <|image_pad|> bookended by vision_start/end and ships the raw bytes out-of-band as a base64 data URI in a new `multi_modal_data.image` field on /v1/completions. vLLM's Qwen3VL processor uses PromptReplacement with target=single <|image_pad|> and replacement=N image_pads, so the wire-form matches what the processor expects and it re-expands to N server-side. Server side needs /v1/completions to accept multi_modal_data for this to land images end-to-end — that's the next piece. Co-Authored-By: Proof of Concept --- src/agent/api/mod.rs | 24 ++++++++++++- src/agent/context.rs | 80 ++++++++++++++++++++++++++++++++++++++++++++ src/agent/mod.rs | 16 ++++++--- 3 files changed, 115 insertions(+), 5 deletions(-) diff --git a/src/agent/api/mod.rs b/src/agent/api/mod.rs index 7c06fa7..649d95c 100644 --- a/src/agent/api/mod.rs +++ b/src/agent/api/mod.rs @@ -78,18 +78,31 @@ impl ApiClient { prompt_tokens: &[u32], sampling: SamplingParams, priority: Option, + ) -> (mpsc::UnboundedReceiver, AbortOnDrop) { + self.stream_completion_mm(prompt_tokens, &[], sampling, priority) + } + + pub(crate) fn stream_completion_mm( + &self, + prompt_tokens: &[u32], + images: &[super::context::WireImage], + sampling: SamplingParams, + priority: Option, ) -> (mpsc::UnboundedReceiver, AbortOnDrop) { let (tx, rx) = mpsc::unbounded_channel(); let client = self.client.clone(); let api_key = self.api_key.clone(); let model = self.model.clone(); let prompt_tokens = prompt_tokens.to_vec(); + let images: Vec<(Vec, String)> = images.iter() + .map(|i| (i.bytes.clone(), i.mime.clone())) + .collect(); let base_url = self.base_url.clone(); let handle = tokio::spawn(async move { let result = stream_completions( &client, &base_url, &api_key, &model, - &prompt_tokens, &tx, sampling, priority, + &prompt_tokens, &images, &tx, sampling, priority, ).await; if let Err(e) = result { let _ = tx.send(StreamToken::Error(e.to_string())); @@ -110,6 +123,7 @@ async fn stream_completions( api_key: &str, model: &str, prompt_tokens: &[u32], + images: &[(Vec, String)], tx: &mpsc::UnboundedSender, sampling: SamplingParams, priority: Option, @@ -126,6 +140,14 @@ async fn stream_completions( "skip_special_tokens": false, "stop_token_ids": [super::tokenizer::IM_END], }); + if !images.is_empty() { + use base64::Engine; + let b64 = base64::engine::general_purpose::STANDARD; + let uris: Vec = images.iter() + .map(|(bytes, mime)| format!("data:{};base64,{}", mime, b64.encode(bytes))) + .collect(); + request["multi_modal_data"] = serde_json::json!({ "image": uris }); + } if let Some(p) = priority { request["priority"] = serde_json::json!(p); } diff --git a/src/agent/context.rs b/src/agent/context.rs index 57b2c7a..0082f06 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -884,6 +884,58 @@ impl Ast for ContextState { } } +/// An image collected from the AST for a request body. The AST stores +/// the pre-expanded token form (N image_pads) for accurate budget +/// accounting; the wire form collapses each Image to a single +/// `<|image_pad|>` between vision bookends and ships the bytes +/// separately as multi_modal_data. +pub struct WireImage { + pub bytes: Vec, + pub mime: String, +} + +fn wire_into(node: &AstNode, tokens: &mut Vec, images: &mut Vec) { + match node { + AstNode::Leaf(leaf) => match leaf.body() { + NodeBody::Image { bytes, mime, .. } => { + tokens.push(tokenizer::VISION_START); + tokens.push(tokenizer::IMAGE_PAD); + tokens.push(tokenizer::VISION_END); + images.push(WireImage { + bytes: bytes.clone(), + mime: mime.clone(), + }); + } + _ => tokens.extend_from_slice(leaf.token_ids()), + }, + AstNode::Branch { role, children, .. } => { + tokens.push(tokenizer::IM_START); + tokens.extend(tokenizer::encode(&format!("{}\n", role.as_str()))); + for c in children { + wire_into(c, tokens, images); + } + tokens.push(tokenizer::IM_END); + tokens.extend(tokenizer::encode("\n")); + } + } +} + +impl ContextState { + /// Assemble the prompt in wire form: token stream with a single + /// `<|image_pad|>` per image (vLLM expands back to N), plus the list + /// of images to send as multi_modal_data. + pub fn wire_prompt(&self) -> (Vec, Vec) { + let mut tokens = Vec::new(); + let mut images = Vec::new(); + for section in self.sections() { + for node in section { + wire_into(node, &mut tokens, &mut images); + } + } + (tokens, images) + } +} + impl ContextState { fn section_mut(&mut self, section: Section) -> &mut Vec { match section { @@ -1531,6 +1583,34 @@ mod tests { assert!(rendered.ends_with("<|vision_end|>")); } + #[test] + fn test_wire_prompt_collapses_image_pads() { + let mut ctx = ContextState::new(); + ctx.push_no_log(Section::Conversation, AstNode::branch(Role::User, vec![ + AstNode::content("look:"), + AstNode::image(vec![0xDE, 0xAD], "image/png", 512, 512), + ])); + + // AST side: N image_pads + bookends, full budget accounting. + let full = ctx.token_ids(); + let n_image_pads_full = full.iter() + .filter(|&&t| t == tokenizer::IMAGE_PAD).count(); + assert_eq!(n_image_pads_full, qwen3_image_token_count(512, 512) as usize); + + // Wire side: single image_pad, bytes moved to images list. + let (wire, images) = ctx.wire_prompt(); + let n_image_pads_wire = wire.iter() + .filter(|&&t| t == tokenizer::IMAGE_PAD).count(); + assert_eq!(n_image_pads_wire, 1); + assert_eq!(images.len(), 1); + assert_eq!(images[0].bytes, vec![0xDE, 0xAD]); + assert_eq!(images[0].mime, "image/png"); + + // vision_start/vision_end bookends are preserved in wire form. + assert_eq!(wire.iter().filter(|&&t| t == tokenizer::VISION_START).count(), 1); + assert_eq!(wire.iter().filter(|&&t| t == tokenizer::VISION_END).count(), 1); + } + #[test] fn test_image_serde_roundtrip() { let node = AstNode::image(vec![0xDE, 0xAD, 0xBE, 0xEF], "image/png", 64, 64); diff --git a/src/agent/mod.rs b/src/agent/mod.rs index 5368db6..cb50568 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -285,16 +285,23 @@ impl Agent { } pub async fn assemble_prompt_tokens(&self) -> Vec { + self.assemble_prompt().await.0 + } + + /// Assemble a ready-to-send prompt: token stream in wire form (each + /// image collapsed to a single `<|image_pad|>`) paired with the + /// images to attach as multi_modal_data. + pub async fn assemble_prompt(&self) -> (Vec, Vec) { let ctx = self.context.lock().await; let st = self.state.lock().await; - let mut tokens = ctx.token_ids(); + let (mut tokens, images) = ctx.wire_prompt(); tokens.push(tokenizer::IM_START); if st.think_native { tokens.extend(tokenizer::encode("assistant\n\n")); } else { tokens.extend(tokenizer::encode("assistant\n")); } - tokens + (tokens, images) } /// Rebuild the tools section of the system prompt from the current tools list. @@ -354,10 +361,11 @@ impl Agent { let _thinking = start_activity(&agent, "thinking...").await; let (rx, _stream_guard) = { - let prompt_tokens = agent.assemble_prompt_tokens().await; + let (prompt_tokens, images) = agent.assemble_prompt().await; let st = agent.state.lock().await; - agent.client.stream_completion( + agent.client.stream_completion_mm( &prompt_tokens, + &images, api::SamplingParams { temperature: st.temperature, top_p: st.top_p, From 6f20e68865260ce5bc0fce8467af2d6d0ed4c0b8 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 16 Apr 2026 18:17:05 -0400 Subject: [PATCH 36/94] poc-memory: load AppConfig at startup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit admin load-context (and any subcommand that reaches config::app()) panicked with "config::app() called before load_app()" because the poc-memory binary never initialized the global AppConfig. The main consciousness binary loads it via load_session; poc-memory never did. Load with default CliArgs before dispatch — figment still pulls from ~/.consciousness/config.json5 and env the same way. Bail on error instead of limping: a broken config means paths like memory_root are wrong and the tool will misbehave silently. Co-Authored-By: Proof of Concept --- src/main.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/main.rs b/src/main.rs index 78bfa4f..f13448c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -482,6 +482,14 @@ async fn main() { let cli = Cli::parse(); + // Some subcommands (e.g. admin load-context) read from the global + // AppConfig. poc-memory has no config CLI flags of its own, so load + // with defaults — figment still pulls from ~/.consciousness/config.json5 + // and env the same way. + if let Err(e) = crate::config::load_app(&crate::user::CliArgs::default()) { + eprintln!("warning: failed to load config: {:#}", e); + } + if let Err(e) = cli.command.run().await { eprintln!("Error: {}", e); process::exit(1); From e59f6a59e299e0af2126f0e458621661f2d92911 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Thu, 16 Apr 2026 18:38:38 -0400 Subject: [PATCH 37/94] config: restore surface_hooks field Commit 2989a6afaaa7 ("config: drop dead code") removed surface_hooks as having "zero external readers" but missed consciousness-claude/src/hook.rs as a consumer. That crate stopped building, so poc-hook never ran and no agent cycles (surface-observe, reflect, journal) fired. Restore the field with a default of the three hook events we install (UserPromptSubmit, PostToolUse, Stop), so a fresh install works without needing to hand-edit config.json5. Co-Authored-By: Proof of Concept --- src/config.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/config.rs b/src/config.rs index b7ea597..6323aae 100644 --- a/src/config.rs +++ b/src/config.rs @@ -29,6 +29,9 @@ static CONFIG: OnceLock>> = OnceLock::new(); fn default_stream_timeout() -> u64 { 60 } fn default_scoring_interval_secs() -> u64 { 3600 } // 1 hour fn default_scoring_response_window() -> usize { 100 } +fn default_surface_hooks() -> Vec { + vec!["UserPromptSubmit".into(), "PostToolUse".into(), "Stop".into()] +} fn default_node_weight() -> f64 { 0.7 } fn default_edge_decay() -> f64 { 0.3 } fn default_max_hops() -> u32 { 3 } @@ -73,6 +76,10 @@ pub struct Config { /// Max conversation bytes to include in surface agent context. #[serde(default)] pub surface_conversation_bytes: Option, + /// Claude Code hook events that trigger agent cycles (surface-observe, + /// reflect, journal). Read by consciousness-claude/src/hook.rs. + #[serde(default = "default_surface_hooks")] + pub surface_hooks: Vec, // Spreading activation parameters #[serde(default = "default_node_weight")] @@ -104,6 +111,7 @@ impl Default for Config { "separator".into(), "split".into(), ], surface_conversation_bytes: None, + surface_hooks: default_surface_hooks(), mcp_servers: vec![], lsp_servers: vec![], default_node_weight: default_node_weight(), From b8485ed6c13a7b2b5281eea90c25520cb50dff27 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Thu, 16 Apr 2026 20:47:05 -0400 Subject: [PATCH 38/94] agent: compact() preserves Identity section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit compact() was calling reload_context() to re-fetch personality_nodes from the store and pushing fresh AstNode::memory leaves into the Identity section. Fresh leaves start with score: None, so every compact — which fires after every turn (mind/mod.rs:884) — was wiping any memory scores that had just been computed. Scoring then often ran immediately after compact on the same path (line 886), starting from a zero-score Identity section. Drop the rebuild. Identity content is loaded at startup via new() + restore_from_log(); compact doesn't need to redo that. Mid-session edits to personality-node content are a non-goal — a restart picks them up. Scores survive. Co-Authored-By: Proof of Concept --- src/agent/mod.rs | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/src/agent/mod.rs b/src/agent/mod.rs index cb50568..bc62955 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -583,20 +583,9 @@ impl Agent { } pub async fn compact(&self) { - match crate::config::reload_context().await { - Ok(personality) => { - let mut ctx = self.context.lock().await; - // System section (prompt + tools) set by new(), don't touch it - ctx.clear(Section::Identity); - for (name, content) in &personality { - ctx.push_no_log(Section::Identity, AstNode::memory(name, content)); - } - } - Err(e) => { - dbglog!("warning: failed to reload identity: {:#}", e); - } - } - + // Identity section is left in place — mid-session rebuilds discard + // memory scores. Content edits to personality nodes get picked up at + // the next restart via new() + restore_from_log(). self.load_startup_journal().await; self.context.lock().await.trim_conversation(); From 0d1044c2e85460cfcba986544a01b0250ec85219 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Thu, 16 Apr 2026 20:47:16 -0400 Subject: [PATCH 39/94] mind: trigger incremental scoring on startup + log persist path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two changes to make scoring debuggable and self-starting: 1. init() kicks off start_memory_scoring() after restore_from_log + load_memory_scores. No user message needed to exercise the incremental path. 2. Diagnostic logging around the on_score persist path: - [scoring] persisted K → N.NNN (Section[i]) read_back=Some(...) when find_memory_by_key succeeds and set_score stores the score (with a read-back check on the leaf). - [scoring] DROP K: find_memory_by_key None (id=N, cv=M) when the scored key isn't findable in the live context — with section sizes to diagnose whether content shrank. - [scoring] snapshot size=N contains(K)=true/false after collect_memory_scores, to catch the case where set_score claims to have written but collect doesn't see it. - [scoring] about to save N entries - save_memory_scores now also logs serialize/write errors so a silent write failure isn't invisible. Co-Authored-By: Proof of Concept --- src/mind/mod.rs | 51 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/src/mind/mod.rs b/src/mind/mod.rs index 11d45b1..474e2c2 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -103,9 +103,13 @@ fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTreeMap, path: &std::path::Path) { - if let Ok(json) = serde_json::to_string_pretty(scores) { - let _ = std::fs::write(path, json); - dbglog!("[scoring] saved {} scores to {}", scores.len(), path.display()); + match serde_json::to_string_pretty(scores) { + Ok(json) => match std::fs::write(path, &json) { + Ok(()) => dbglog!("[scoring] saved {} scores to {} ({} bytes)", + scores.len(), path.display(), json.len()), + Err(e) => dbglog!("[scoring] save FAILED ({}): {}", path.display(), e), + }, + Err(e) => dbglog!("[scoring] serialize FAILED: {}", e), } } @@ -506,6 +510,17 @@ impl Mind { // Load persistent subconscious state let state_path = self.config.session_dir.join("subconscious-state.json"); self.subconscious.lock().await.set_state_path(state_path); + + // Kick off an incremental scoring pass on startup so memories due + // for re-scoring get evaluated without requiring a user message. + { + let mut s = self.shared.lock().unwrap(); + if !s.scoring_in_flight { + s.scoring_in_flight = true; + drop(s); + self.start_memory_scoring(); + } + } } pub fn turn_watch(&self) -> tokio::sync::watch::Receiver { @@ -619,14 +634,40 @@ impl Mind { let mut ctx = agent.context.lock().await; // Find memory by key in identity or conversation let found = find_memory_by_key(&ctx, &key); - if let Some((section, i)) = found { - ctx.set_score(section, i, Some(score)); + match found { + Some((section, i)) => { + ctx.set_score(section, i, Some(score)); + let nodes: &[crate::agent::context::AstNode] = match section { + Section::Identity => ctx.identity(), + Section::Conversation => ctx.conversation(), + _ => &[], + }; + let read_back = match nodes.get(i) { + Some(crate::agent::context::AstNode::Leaf(l)) => match l.body() { + crate::agent::context::NodeBody::Memory { score, .. } => format!("{:?}", score), + _ => "not-memory".to_string(), + }, + _ => "out-of-bounds".to_string(), + }; + dbglog!("[scoring] persisted {} → {:.3} ({:?}[{}]) read_back={}", + key, score, section, i, read_back); + } + None => { + dbglog!( + "[scoring] DROP {}: find_memory_by_key None (id={}, cv={})", + key, ctx.identity().len(), ctx.conversation().len() + ); + } } let snapshot = collect_memory_scores(&ctx); + let in_snapshot = snapshot.contains_key(&key); + dbglog!("[scoring] snapshot size={} contains({})={}", + snapshot.len(), key, in_snapshot); drop(ctx); agent.state.lock().await.changed.notify_one(); snapshot }; + dbglog!("[scoring] about to save {} entries", scores_snapshot.len()); save_memory_scores(&scores_snapshot, &path); } }, From eea7de47537c80e6c4c1d8b2d437e07377e66f26 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Apr 2026 15:16:07 -0400 Subject: [PATCH 40/94] agent: unify prompt assembly across agent and learn paths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit wire_prompt() gains a conv_range and a skip closure, and returns the assistant-message token ranges needed by the scoring path. The agent path passes 0..len + |_| false and ignores the ranges. Memory-ablation scoring and candidate generation pass a prefix range + a predicate (e.g. is_memory_node, or |n| memory_key(n) == Some(key)). This deletes subconscious/learn.rs's build_token_ids, its private Filter enum, and the is_memory/memory_key duplicates — the walk over context sections now has one home. Adding a section or changing section order in the agent path won't silently drift away from what scoring sees. call_score forwards multi_modal_data when the wire-form prompt contains images. generate_alternate switches to stream_completion_mm and passes the same images. Scoring on image-bearing contexts now sends wire form (1 image_pad + image data) instead of expanded image_pads with no image data; text-only contexts are bit-identical. Co-Authored-By: Proof of Concept --- src/agent/context.rs | 62 +++++++++++++++-- src/agent/mod.rs | 3 +- src/subconscious/learn.rs | 141 +++++++++++--------------------------- 3 files changed, 98 insertions(+), 108 deletions(-) diff --git a/src/agent/context.rs b/src/agent/context.rs index 0082f06..38127d5 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -920,19 +920,67 @@ fn wire_into(node: &AstNode, tokens: &mut Vec, images: &mut Vec) } } +pub fn memory_key(node: &AstNode) -> Option<&str> { + match node { + AstNode::Leaf(leaf) => match leaf.body() { + NodeBody::Memory { key, .. } => Some(key), + _ => None, + }, + _ => None, + } +} + +pub fn is_memory_node(node: &AstNode) -> bool { + matches!(node, AstNode::Leaf(leaf) if matches!(leaf.body(), NodeBody::Memory { .. })) +} + impl ContextState { /// Assemble the prompt in wire form: token stream with a single /// `<|image_pad|>` per image (vLLM expands back to N), plus the list - /// of images to send as multi_modal_data. - pub fn wire_prompt(&self) -> (Vec, Vec) { + /// of images to send as multi_modal_data, plus the (start, end) token + /// positions of each assistant message branch emitted (used by the + /// scoring path as `score_ranges`). + /// + /// `conv_range` selects a prefix (or any sub-range) of conversation + /// entries to include — the agent path passes `0..conversation().len()`; + /// scoring / candidate generation pass a prefix up to the entry of + /// interest. + /// + /// `skip` is a predicate applied to identity and conversation entries; + /// returning true drops the node from the prompt. The agent path passes + /// `|_| false`; memory-ablation scoring passes e.g. `is_memory_node` or + /// `|n| memory_key(n) == Some(key)`. + pub fn wire_prompt( + &self, + conv_range: std::ops::Range, + mut skip: F, + ) -> (Vec, Vec, Vec<(usize, usize)>) + where F: FnMut(&AstNode) -> bool, + { let mut tokens = Vec::new(); let mut images = Vec::new(); - for section in self.sections() { - for node in section { - wire_into(node, &mut tokens, &mut images); + let mut assistant_ranges = Vec::new(); + + for node in self.system() { + wire_into(node, &mut tokens, &mut images); + } + for node in self.identity() { + if skip(node) { continue; } + wire_into(node, &mut tokens, &mut images); + } + for node in self.journal() { + wire_into(node, &mut tokens, &mut images); + } + for node in &self.conversation()[conv_range] { + if skip(node) { continue; } + let start = tokens.len(); + let is_asst = matches!(node, AstNode::Branch { role: Role::Assistant, .. }); + wire_into(node, &mut tokens, &mut images); + if is_asst { + assistant_ranges.push((start, tokens.len())); } } - (tokens, images) + (tokens, images, assistant_ranges) } } @@ -1598,7 +1646,7 @@ mod tests { assert_eq!(n_image_pads_full, qwen3_image_token_count(512, 512) as usize); // Wire side: single image_pad, bytes moved to images list. - let (wire, images) = ctx.wire_prompt(); + let (wire, images, _) = ctx.wire_prompt(0..ctx.conversation().len(), |_| false); let n_image_pads_wire = wire.iter() .filter(|&&t| t == tokenizer::IMAGE_PAD).count(); assert_eq!(n_image_pads_wire, 1); diff --git a/src/agent/mod.rs b/src/agent/mod.rs index bc62955..436dda3 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -294,7 +294,8 @@ impl Agent { pub async fn assemble_prompt(&self) -> (Vec, Vec) { let ctx = self.context.lock().await; let st = self.state.lock().await; - let (mut tokens, images) = ctx.wire_prompt(); + let (mut tokens, images, _) = + ctx.wire_prompt(0..ctx.conversation().len(), |_| false); tokens.push(tokenizer::IM_START); if st.think_native { tokens.extend(tokenizer::encode("assistant\n\n")); diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index 7137211..26c854b 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -15,95 +15,17 @@ // hasn't internalized. 2 API calls. use crate::agent::api::ApiClient; -use crate::agent::context::{AstNode, Ast, NodeBody, ContextState, Role}; +use crate::agent::context::{ + Ast, AstNode, ContextState, Role, WireImage, is_memory_node, memory_key, +}; use crate::agent::tokenizer; const SCORE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(300); -// ── Message building ──────────────────────────────────────────── - -/// What to filter when building the message array for scoring. -#[allow(dead_code)] -enum Filter<'a> { - None, - SkipIndex(usize), - SkipKey(&'a str), - SkipAllMemories, -} - -fn is_memory(node: &AstNode) -> bool { - matches!(node, AstNode::Leaf(leaf) if matches!(leaf.body(), NodeBody::Memory { .. })) -} - -fn memory_key(node: &AstNode) -> Option<&str> { - match node { - AstNode::Leaf(leaf) => match leaf.body() { - NodeBody::Memory { key, .. } => Some(key), - _ => None, - }, - _ => None, - } -} - fn is_assistant(node: &AstNode) -> bool { matches!(node, AstNode::Branch { role: Role::Assistant, .. }) } -/// Build a token ID array for a scoring call. -/// -/// Includes all sections up to and including conversation entries in -/// `range`, with `filter` applied to conversation entries. -/// -/// Returns (token_ids, assistant_ranges) where assistant_ranges are -/// (start, end) token positions for each assistant message. -fn build_token_ids( - context: &ContextState, - range: std::ops::Range, - filter: Filter, -) -> (Vec, Vec<(usize, usize)>) { - use crate::agent::context::Ast; - let mut ids = Vec::new(); - let mut assistant_ranges = Vec::new(); - - for node in context.system() { - ids.extend(node.token_ids()); - } - // Identity nodes can be filtered by key for scoring - for node in context.identity() { - let skip = match &filter { - Filter::SkipKey(key) => memory_key(node) == Some(*key), - Filter::SkipAllMemories => is_memory(node), - _ => false, - }; - if !skip { - ids.extend(node.token_ids()); - } - } - for node in context.journal() { - ids.extend(node.token_ids()); - } - let entries = context.conversation(); - for i in range { - let node = &entries[i]; - let skip = match &filter { - Filter::None => false, - Filter::SkipIndex(idx) => i == *idx, - Filter::SkipKey(key) => memory_key(node) == Some(*key), - Filter::SkipAllMemories => is_memory(node), - }; - if skip { continue; } - - // Track assistant message boundaries - let is_asst = is_assistant(node); - let start = ids.len(); - ids.extend(node.token_ids()); - if is_asst { - assistant_ranges.push((start, ids.len())); - } - } - (ids, assistant_ranges) -} - // ── Score API ─────────────────────────────────────────────────── #[derive(serde::Deserialize)] @@ -126,6 +48,7 @@ async fn call_score( http: &crate::agent::api::http::HttpClient, client: &ApiClient, prompt: &[u32], + images: &[WireImage], ranges: &[(usize, usize)], priority: Option, ) -> anyhow::Result> { @@ -141,6 +64,14 @@ async fn call_score( "score_ranges": ranges, "logprobs": 1, }); + if !images.is_empty() { + use base64::Engine; + let b64 = base64::engine::general_purpose::STANDARD; + let uris: Vec = images.iter() + .map(|img| format!("data:{};base64,{}", img.mime, b64.encode(&img.bytes))) + .collect(); + body["multi_modal_data"] = serde_json::json!({ "image": uris }); + } if let Some(p) = priority { body["priority"] = serde_json::json!(p); } @@ -178,18 +109,24 @@ fn divergence(baseline: &[ScoreResult], without: &[ScoreResult]) -> Vec { } /// Score two message sets and return total divergence. -async fn score_divergence( +async fn score_divergence( http: &crate::agent::api::http::HttpClient, client: &ApiClient, context: &ContextState, range: std::ops::Range, - filter: Filter<'_>, + skip: F, priority: Option, -) -> anyhow::Result<(Vec, Vec)> { - let (baseline_tokens, baseline_ranges) = build_token_ids(context, range.clone(), Filter::None); - let (without_tokens, without_ranges) = build_token_ids(context, range, filter); - let baseline = call_score(http, client, &baseline_tokens, &baseline_ranges, priority).await?; - let without = call_score(http, client, &without_tokens, &without_ranges, priority).await?; +) -> anyhow::Result<(Vec, Vec)> +where F: FnMut(&AstNode) -> bool, +{ + let (baseline_tokens, baseline_images, baseline_ranges) = + context.wire_prompt(range.clone(), |_| false); + let (without_tokens, without_images, without_ranges) = + context.wire_prompt(range, skip); + let baseline = call_score(http, client, &baseline_tokens, &baseline_images, + &baseline_ranges, priority).await?; + let without = call_score(http, client, &without_tokens, &without_images, + &without_ranges, priority).await?; let divs = divergence(&baseline, &without); Ok((divs, baseline)) } @@ -228,21 +165,22 @@ pub async fn score_memories( let http = http_client(); let activity = crate::agent::start_activity(agent, "scoring: baseline").await; - let (baseline_tokens, baseline_ranges) = { + let (baseline_tokens, baseline_images, baseline_ranges) = { let ctx = agent.context.lock().await; - build_token_ids(&ctx, 0..ctx.conversation().len(), Filter::None) + ctx.wire_prompt(0..ctx.conversation().len(), |_| false) }; - let baseline = call_score(&http, client, &baseline_tokens, &baseline_ranges, Some(5)).await?; + let baseline = call_score(&http, client, &baseline_tokens, &baseline_images, + &baseline_ranges, Some(5)).await?; dbglog!("[scoring-full] baseline done ({} response scores)", baseline.len()); for (mem_idx, key) in memory_keys.iter().enumerate() { activity.update(format!("scoring: {}/{}", mem_idx + 1, total)).await; dbglog!("[scoring-full] {}/{}: {}", mem_idx + 1, total, key); - let (tokens, ranges) = { + let (tokens, images, ranges) = { let ctx = agent.context.lock().await; - build_token_ids(&ctx, 0..ctx.conversation().len(), Filter::SkipKey(key)) + ctx.wire_prompt(0..ctx.conversation().len(), |n| memory_key(n) == Some(key.as_str())) }; - let row = match call_score(&http, client, &tokens, &ranges, Some(5)).await { + let row = match call_score(&http, client, &tokens, &images, &ranges, Some(5)).await { Ok(without) => { let divs = divergence(&baseline, &without); let max_div = divs.iter().cloned().fold(0.0f64, f64::max); @@ -326,7 +264,8 @@ pub async fn score_memory( } let http = http_client(); - let (divs, _) = score_divergence(&http, client, context, range, Filter::SkipKey(key), Some(5)).await?; + let (divs, _) = score_divergence(&http, client, context, range, + |n| memory_key(n) == Some(key), Some(5)).await?; Ok(divs.iter().sum()) } @@ -418,7 +357,8 @@ where } activity.update(format!("scoring: {}/{} {}", scored + 1, total, key)).await; - match score_divergence(&http, client, context, range, Filter::SkipKey(key), Some(5)).await { + match score_divergence(&http, client, context, range, + |n| memory_key(n) == Some(key), Some(5)).await { Ok((divs, _)) => { let n_responses = divs.len(); let max_div = divs.iter().cloned().fold(0.0f64, f64::max); @@ -464,7 +404,7 @@ pub async fn score_finetune( } let http = http_client(); - let (divs, _) = score_divergence(&http, client, context, range, Filter::SkipAllMemories, Some(5)).await?; + let (divs, _) = score_divergence(&http, client, context, range, is_memory_node, Some(5)).await?; let mut results: Vec<(usize, f64)> = response_positions.iter() .enumerate() @@ -593,7 +533,7 @@ pub async fn score_finetune_candidates( let prior_context = render_prior_context(entries, entry_idx, 2); // Build token IDs: context = everything before response, continuation = response. - let (context_ids, _) = build_token_ids(context, 0..entry_idx, Filter::None); + let (context_ids, _, _) = context.wire_prompt(0..entry_idx, |_| false); let continuation_ids: Vec = node.token_ids().into_iter().collect(); candidates.push(FinetuneCandidate { @@ -636,7 +576,8 @@ async fn generate_alternate( use crate::agent::api::{SamplingParams, StreamToken}; // Build context tokens without memories, up to the response - let (mut prompt, _) = build_token_ids(context, 0..entry_idx, Filter::SkipAllMemories); + let (mut prompt, images, _) = + context.wire_prompt(0..entry_idx, is_memory_node); // Add assistant turn start prompt.push(tokenizer::IM_START); @@ -648,7 +589,7 @@ async fn generate_alternate( top_p: 0.95, top_k: 20, }; - let (mut rx, _guard) = client.stream_completion(&prompt, sampling, Some(-5)); + let (mut rx, _guard) = client.stream_completion_mm(&prompt, &images, sampling, Some(-5)); let mut tokens = Vec::new(); while let Some(tok) = rx.recv().await { From c5745e38e2f345811e09506c700d53ad16ec663b Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Apr 2026 15:20:02 -0400 Subject: [PATCH 41/94] subconscious: lift continuation gen + render helpers into shared homes - context.rs gains is_assistant, render_branch_text, render_prior_context alongside memory_key / is_memory_node. They're pure AST helpers, used by both the finetune pipeline and the forthcoming compare screen. - new subconscious/generate.rs holds gen_continuation(context, entry_idx, skip, client): build the prompt from a context prefix with an arbitrary skip predicate, send to the model, decode the completion. Takes both the predicate and the client so callers can aim it at memory-stripped contexts (finetune), same-context-different-model (F7 compare), or whatever else. - learn.rs drops its private copies of those helpers and the inline generate_alternate; the finetune path now reads as gen_continuation(context, idx, is_memory_node, client). Pure refactor, no behavior change. Co-Authored-By: Proof of Concept --- src/agent/context.rs | 47 +++++++++++++++++++ src/subconscious/generate.rs | 46 ++++++++++++++++++ src/subconscious/learn.rs | 91 ++---------------------------------- src/subconscious/mod.rs | 1 + 4 files changed, 98 insertions(+), 87 deletions(-) create mode 100644 src/subconscious/generate.rs diff --git a/src/agent/context.rs b/src/agent/context.rs index 38127d5..948e9f2 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -934,6 +934,53 @@ pub fn is_memory_node(node: &AstNode) -> bool { matches!(node, AstNode::Leaf(leaf) if matches!(leaf.body(), NodeBody::Memory { .. })) } +pub fn is_assistant(node: &AstNode) -> bool { + matches!(node, AstNode::Branch { role: Role::Assistant, .. }) +} + +/// Concatenate the text of a Branch's Leaf children — what the model +/// actually produced on that turn (Content + Thinking + ToolCall name). +pub fn render_branch_text(children: &[AstNode]) -> String { + children.iter() + .filter_map(|c| match c { + AstNode::Leaf(leaf) => Some(leaf.body().text().to_string()), + _ => None, + }) + .collect::>() + .join("") +} + +/// Render the last `max_msgs` user/assistant branches before `idx` as a +/// review-friendly string with `[user]` / `[assistant]` markers. +pub fn render_prior_context(entries: &[AstNode], idx: usize, max_msgs: usize) -> String { + let mut picked: Vec<&AstNode> = Vec::with_capacity(max_msgs); + for i in (0..idx).rev() { + if picked.len() >= max_msgs { break; } + if let AstNode::Branch { role, .. } = &entries[i] { + if matches!(role, Role::User | Role::Assistant) { + picked.push(&entries[i]); + } + } + } + picked.reverse(); + + let mut out = String::new(); + for node in picked { + if let AstNode::Branch { role, children, .. } = node { + let marker = match role { + Role::User => "[user]", + Role::Assistant => "[assistant]", + _ => continue, + }; + out.push_str(marker); + out.push('\n'); + out.push_str(render_branch_text(children).trim()); + out.push_str("\n\n"); + } + } + out.trim_end().to_string() +} + impl ContextState { /// Assemble the prompt in wire form: token stream with a single /// `<|image_pad|>` per image (vLLM expands back to N), plus the list diff --git a/src/subconscious/generate.rs b/src/subconscious/generate.rs new file mode 100644 index 0000000..44f967a --- /dev/null +++ b/src/subconscious/generate.rs @@ -0,0 +1,46 @@ +// generate.rs — Continuation generation for scoring / comparison flows. +// +// Shared by the finetune pipeline (learn.rs) and the compare screen: +// given a context prefix and a skip predicate, generate what the model +// would say as the next assistant turn. + +use crate::agent::api::{ApiClient, SamplingParams, StreamToken}; +use crate::agent::context::{AstNode, ContextState}; +use crate::agent::tokenizer; + +/// Generate an assistant continuation from the context up to `entry_idx`, +/// with `skip` applied to identity + conversation entries during prompt +/// assembly. The model is whichever `client` points at — the default +/// runtime client for memory-ablation alternates, a test-model client +/// for F7 comparison. +pub async fn gen_continuation( + context: &ContextState, + entry_idx: usize, + skip: F, + client: &ApiClient, +) -> anyhow::Result +where F: FnMut(&AstNode) -> bool, +{ + let (mut prompt, images, _) = context.wire_prompt(0..entry_idx, skip); + + prompt.push(tokenizer::IM_START); + prompt.extend(tokenizer::encode("assistant\n")); + + let sampling = SamplingParams { + temperature: 0.6, + top_p: 0.95, + top_k: 20, + }; + let (mut rx, _guard) = client.stream_completion_mm(&prompt, &images, sampling, Some(-5)); + + let mut tokens = Vec::new(); + while let Some(tok) = rx.recv().await { + match tok { + StreamToken::Token(id) => tokens.push(id), + StreamToken::Done { .. } => break, + StreamToken::Error(e) => anyhow::bail!("generation error: {}", e), + } + } + + Ok(tokenizer::decode(&tokens)) +} diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index 26c854b..b7656bf 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -16,16 +16,13 @@ use crate::agent::api::ApiClient; use crate::agent::context::{ - Ast, AstNode, ContextState, Role, WireImage, is_memory_node, memory_key, + Ast, AstNode, ContextState, Role, WireImage, + is_assistant, is_memory_node, memory_key, render_branch_text, render_prior_context, }; -use crate::agent::tokenizer; +use crate::subconscious::generate::gen_continuation; const SCORE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(300); -fn is_assistant(node: &AstNode) -> bool { - matches!(node, AstNode::Branch { role: Role::Assistant, .. }) -} - // ── Score API ─────────────────────────────────────────────────── #[derive(serde::Deserialize)] @@ -414,50 +411,6 @@ pub async fn score_finetune( Ok(results) } -/// Concatenate the text of a Branch's Leaf children — what the model -/// actually produced on that turn (Content + Thinking + ToolCall name). -fn render_branch_text(children: &[AstNode]) -> String { - children.iter() - .filter_map(|c| match c { - AstNode::Leaf(leaf) => Some(leaf.body().text().to_string()), - _ => None, - }) - .collect::>() - .join("") -} - -/// Render the last `max_msgs` user/assistant branches before `idx` as a -/// review-friendly string with `[user]` / `[assistant]` markers. -fn render_prior_context(entries: &[AstNode], idx: usize, max_msgs: usize) -> String { - use crate::agent::context::Role; - let mut picked: Vec<&AstNode> = Vec::with_capacity(max_msgs); - for i in (0..idx).rev() { - if picked.len() >= max_msgs { break; } - if let AstNode::Branch { role, .. } = &entries[i] { - if matches!(role, Role::User | Role::Assistant) { - picked.push(&entries[i]); - } - } - } - picked.reverse(); - - let mut out = String::new(); - for node in picked { - if let AstNode::Branch { role, children, .. } = node { - let marker = match role { - Role::User => "[user]", - Role::Assistant => "[assistant]", - _ => continue, - }; - out.push_str(marker); - out.push('\n'); - out.push_str(render_branch_text(children).trim()); - out.push_str("\n\n"); - } - } - out.trim_end().to_string() -} - /// Enriched finetune candidate with context for review. #[derive(Clone, Debug)] pub struct FinetuneCandidate { @@ -556,7 +509,7 @@ pub async fn score_finetune_candidates( activity.update( format!("finetune: generating alternate {}/{}", i + 1, total) ).await; - match generate_alternate(context, candidate.entry_idx, client).await { + match gen_continuation(context, candidate.entry_idx, is_memory_node, client).await { Ok(text) => candidate.alternate_text = Some(text), Err(e) => dbglog!("[finetune] alternate generation failed: {:#}", e), } @@ -567,42 +520,6 @@ pub async fn score_finetune_candidates( Ok((total, max_divergence)) } -/// Generate what the model would say without memories for a given entry. -async fn generate_alternate( - context: &ContextState, - entry_idx: usize, - client: &ApiClient, -) -> anyhow::Result { - use crate::agent::api::{SamplingParams, StreamToken}; - - // Build context tokens without memories, up to the response - let (mut prompt, images, _) = - context.wire_prompt(0..entry_idx, is_memory_node); - - // Add assistant turn start - prompt.push(tokenizer::IM_START); - prompt.extend(tokenizer::encode("assistant\n")); - - // Generate completion - let sampling = SamplingParams { - temperature: 0.6, - top_p: 0.95, - top_k: 20, - }; - let (mut rx, _guard) = client.stream_completion_mm(&prompt, &images, sampling, Some(-5)); - - let mut tokens = Vec::new(); - while let Some(tok) = rx.recv().await { - match tok { - StreamToken::Token(id) => tokens.push(id), - StreamToken::Done { .. } => break, - StreamToken::Error(e) => anyhow::bail!("generation error: {}", e), - } - } - - Ok(tokenizer::decode(&tokens)) -} - // ── Finetune config and persistence ───────────────────────────── use std::path::PathBuf; diff --git a/src/subconscious/mod.rs b/src/subconscious/mod.rs index 433f721..d50f833 100644 --- a/src/subconscious/mod.rs +++ b/src/subconscious/mod.rs @@ -3,5 +3,6 @@ pub mod daemon; pub mod defs; pub mod digest; +pub mod generate; pub mod learn; pub mod prompts; From 575325e85541212016909804346a86e6456f5a1e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Apr 2026 15:57:23 -0400 Subject: [PATCH 42/94] mind: MindTriggered trait for background scoring flows MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mind's impl had accumulated ~50 lines of setup glue per scoring flow (memory, memory-full, finetune): snapshot config, clone handles, resolve context, spawn task, route results back through BgEvent, write stats. The shape was identical; only the middle changed. Introduce the MindTriggered trait: pub trait MindTriggered { fn trigger(&self); } Each flow becomes a struct next to its scoring code that owns its dependencies and a JoinHandle (behind a sync Mutex for interior mutability): subconscious::learn::MemoryScoring (Score, ScoreFull) subconscious::learn::FinetuneScoring (ScoreFinetune) Mind holds one of each and dispatches in one line: MindCommand::Score => self.memory_scoring.trigger(), MindCommand::ScoreFull => self.memory_scoring.trigger_full(), MindCommand::ScoreFinetune => self.finetune_scoring.trigger(), Each struct picks its own trigger semantics — memory scoring is no-op-if-running (!handle.is_finished()); finetune is abort-restart. Falls out: - BgEvent / bg_tx / bg_rx disappear entirely. Tasks write directly to their slice of MindState and call agent.state.changed.notify_one() to wake the UI. The bg_rx arm in Mind's select loop is gone. - agent.state.memory_scoring_in_flight was duplicating shared.scoring_in_flight via BgEvent routing; now the JoinHandle alone tells us, and shared.scoring_in_flight is written directly by the task for the UI. - start_memory_scoring / start_full_scoring / start_finetune_scoring methods on Mind are deleted; Mind no longer knows the setup shape of any scoring flow. - FinetuneScoringStats moves from mind/ to subconscious/learn.rs next to the function that produces it. No behavior change — same flows, same trigger points, same semantics. Co-Authored-By: Proof of Concept --- src/agent/mod.rs | 3 - src/mind/mod.rs | 287 ++++++++------------------------------ src/subconscious/learn.rs | 199 ++++++++++++++++++++++++++ src/user/mod.rs | 1 + 4 files changed, 258 insertions(+), 232 deletions(-) diff --git a/src/agent/mod.rs b/src/agent/mod.rs index 436dda3..703c65c 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -172,7 +172,6 @@ pub struct AgentState { pub pending_dmn_pause: bool, pub provenance: String, pub generation: u64, - pub memory_scoring_in_flight: bool, pub active_tools: tools::ActiveTools, /// vLLM scheduling priority (lower = higher priority). /// 0 = interactive, 1 = surface agent, 2 = other subconscious, 10 = unconscious. @@ -237,7 +236,6 @@ impl Agent { pending_dmn_pause: false, provenance: "manual".to_string(), generation: 0, - memory_scoring_in_flight: false, active_tools, priority: Some(0), no_compact: false, @@ -275,7 +273,6 @@ impl Agent { pending_dmn_pause: false, provenance: st.provenance.clone(), generation: 0, - memory_scoring_in_flight: false, active_tools: tools::ActiveTools::new(), priority: None, no_compact: true, diff --git a/src/mind/mod.rs b/src/mind/mod.rs index 474e2c2..4ca97ea 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -9,6 +9,44 @@ pub mod unconscious; pub mod identity; pub mod log; +/// A background operation wired off Mind. Each flow (memory scoring, +/// finetune scoring, compare) is a struct holding its dependencies and +/// a TaskHandle; `trigger()` picks the flow's own "start a fresh run" +/// semantics (abort-restart vs no-op-if-running). +pub trait MindTriggered { + fn trigger(&self); +} + +/// Owns a JoinHandle for a background task with two trigger semantics. +/// Uses a sync Mutex for interior mutability so callers can `trigger()` +/// off `&self` (Mind is shared via Arc). +#[derive(Default)] +pub struct TaskHandle(std::sync::Mutex>>); + +impl TaskHandle { + pub fn new() -> Self { Self::default() } + + /// Abort any running task and start a fresh one. + pub fn trigger(&self, fut: F) + where F: std::future::Future + Send + 'static + { + let mut h = self.0.lock().unwrap(); + if let Some(old) = h.take() { old.abort(); } + *h = Some(tokio::spawn(fut)); + } + + /// No-op if a task is still running; otherwise start a fresh one. + pub fn trigger_if_idle(&self, fut: F) + where F: std::future::Future + Send + 'static + { + let mut h = self.0.lock().unwrap(); + if let Some(old) = &*h { + if !old.is_finished() { return; } + } + *h = Some(tokio::spawn(fut)); + } +} + // consciousness.rs — Mind state machine and event loop // // The core runtime for the consciousness binary. Mind manages turns, @@ -48,7 +86,7 @@ fn match_scores( }).collect() } -fn find_memory_by_key(ctx: &ContextState, key: &str) -> Option<(Section, usize)> { +pub(crate) fn find_memory_by_key(ctx: &ContextState, key: &str) -> Option<(Section, usize)> { [(Section::Identity, ctx.identity()), (Section::Conversation, ctx.conversation())] .into_iter() .find_map(|(section, nodes)| { @@ -87,7 +125,7 @@ fn load_memory_scores(ctx: &mut ContextState, path: &std::path::Path) { } /// Collect scored memory keys from identity and conversation entries. -fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTreeMap { +pub(crate) fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTreeMap { ctx.identity().iter() .chain(ctx.conversation().iter()) .filter_map(|node| { @@ -102,7 +140,7 @@ fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTreeMap, path: &std::path::Path) { +pub(crate) fn save_memory_scores(scores: &std::collections::BTreeMap, path: &std::path::Path) { match serde_json::to_string_pretty(scores) { Ok(json) => match std::fs::write(path, &json) { Ok(()) => dbglog!("[scoring] saved {} scores to {} ({} bytes)", @@ -154,22 +192,7 @@ pub struct MindState { /// Fine-tuning candidates identified by scoring. pub finetune_candidates: Vec, /// Last scoring run stats for UI display. - pub finetune_last_run: Option, -} - -/// Stats from the last finetune scoring run. -#[derive(Clone, Debug)] -pub struct FinetuneScoringStats { - /// Count of assistant responses we considered (recent half of context). - pub responses_considered: usize, - /// How many exceeded the divergence threshold. - pub above_threshold: usize, - /// Threshold used for this run. - pub threshold: f64, - /// Highest divergence observed. - pub max_divergence: f64, - /// Error message if the run failed. - pub error: Option, + pub finetune_last_run: Option, } impl Clone for MindState { @@ -318,11 +341,6 @@ impl MindState { } } -/// Background task completion events. -enum BgEvent { - ScoringDone, - FinetuneCandidate(learn::FinetuneCandidate), -} // --- Mind: cognitive state machine --- @@ -339,8 +357,8 @@ pub struct Mind { /// Signals conscious activity to the unconscious loop. /// true = active, false = idle opportunity. conscious_active: tokio::sync::watch::Sender, - bg_tx: mpsc::UnboundedSender, - bg_rx: std::sync::Mutex>>, + memory_scoring: learn::MemoryScoring, + finetune_scoring: learn::FinetuneScoring, _supervisor: crate::thalamus::supervisor::Supervisor, } @@ -380,7 +398,6 @@ impl Mind { ))); let (turn_watch, _) = tokio::sync::watch::channel(false); let (conscious_active, _) = tokio::sync::watch::channel(false); - let (bg_tx, bg_rx) = mpsc::unbounded_channel(); let mut sup = crate::thalamus::supervisor::Supervisor::new(); sup.load_config(); @@ -465,10 +482,17 @@ impl Mind { }); } + let scores_path = config.session_dir.join("memory-scores.json"); + let memory_scoring = learn::MemoryScoring::new( + agent.clone(), shared.clone(), scores_path); + let finetune_scoring = learn::FinetuneScoring::new(agent.clone(), shared.clone()); + Self { agent, shared, config, subconscious, unconscious, - turn_tx, turn_watch, conscious_active, bg_tx, - bg_rx: std::sync::Mutex::new(Some(bg_rx)), _supervisor: sup } + turn_tx, turn_watch, conscious_active, + memory_scoring, + finetune_scoring, + _supervisor: sup } } /// Initialize — restore log, start daemons and background agents. @@ -513,14 +537,7 @@ impl Mind { // Kick off an incremental scoring pass on startup so memories due // for re-scoring get evaluated without requiring a user message. - { - let mut s = self.shared.lock().unwrap(); - if !s.scoring_in_flight { - s.scoring_in_flight = true; - drop(s); - self.start_memory_scoring(); - } - } + self.memory_scoring.trigger(); } pub fn turn_watch(&self) -> tokio::sync::watch::Receiver { @@ -540,24 +557,10 @@ impl Mind { } } MindCommand::Score => { - let mut s = self.shared.lock().unwrap(); - if !s.scoring_in_flight { - s.scoring_in_flight = true; - drop(s); - self.start_memory_scoring(); - } else { - dbglog!("[scoring] skipped: scoring_in_flight=true"); - } + self.memory_scoring.trigger(); } MindCommand::ScoreFull => { - let mut s = self.shared.lock().unwrap(); - if !s.scoring_in_flight { - s.scoring_in_flight = true; - drop(s); - self.start_full_scoring(); - } else { - dbglog!("[scoring-full] skipped: scoring_in_flight=true"); - } + self.memory_scoring.trigger_full(); } MindCommand::Interrupt => { self.shared.lock().unwrap().interrupt(); @@ -588,7 +591,7 @@ impl Mind { self.agent.compact().await; } MindCommand::ScoreFinetune => { - self.start_finetune_scoring(); + self.finetune_scoring.trigger(); } MindCommand::SetLearnThreshold(value) => { if let Err(e) = crate::config_writer::set_learn_threshold(value) { @@ -605,167 +608,6 @@ impl Mind { } } - pub fn start_memory_scoring(&self) { - let agent = self.agent.clone(); - let bg_tx = self.bg_tx.clone(); - let scores_path = self.config.session_dir.join("memory-scores.json"); - let cfg = crate::config::get(); - let max_age = cfg.scoring_interval_secs; - let response_window = cfg.scoring_response_window; - tokio::spawn(async move { - let (context, client) = { - let mut st = agent.state.lock().await; - if st.memory_scoring_in_flight { - dbglog!("[scoring] skipped: memory_scoring_in_flight=true"); - return; - } - st.memory_scoring_in_flight = true; - drop(st); - let ctx = agent.context.lock().await.clone(); - (ctx, agent.client.clone()) - }; - let _result = learn::score_memories_incremental( - &context, max_age as i64, response_window, &client, &agent, - |key: String, score: f64| { - let agent = agent.clone(); - let path = scores_path.clone(); - async move { - let scores_snapshot = { - let mut ctx = agent.context.lock().await; - // Find memory by key in identity or conversation - let found = find_memory_by_key(&ctx, &key); - match found { - Some((section, i)) => { - ctx.set_score(section, i, Some(score)); - let nodes: &[crate::agent::context::AstNode] = match section { - Section::Identity => ctx.identity(), - Section::Conversation => ctx.conversation(), - _ => &[], - }; - let read_back = match nodes.get(i) { - Some(crate::agent::context::AstNode::Leaf(l)) => match l.body() { - crate::agent::context::NodeBody::Memory { score, .. } => format!("{:?}", score), - _ => "not-memory".to_string(), - }, - _ => "out-of-bounds".to_string(), - }; - dbglog!("[scoring] persisted {} → {:.3} ({:?}[{}]) read_back={}", - key, score, section, i, read_back); - } - None => { - dbglog!( - "[scoring] DROP {}: find_memory_by_key None (id={}, cv={})", - key, ctx.identity().len(), ctx.conversation().len() - ); - } - } - let snapshot = collect_memory_scores(&ctx); - let in_snapshot = snapshot.contains_key(&key); - dbglog!("[scoring] snapshot size={} contains({})={}", - snapshot.len(), key, in_snapshot); - drop(ctx); - agent.state.lock().await.changed.notify_one(); - snapshot - }; - dbglog!("[scoring] about to save {} entries", scores_snapshot.len()); - save_memory_scores(&scores_snapshot, &path); - } - }, - ).await; - { - agent.state.lock().await.memory_scoring_in_flight = false; - } - let _ = bg_tx.send(BgEvent::ScoringDone); - }); - } - - /// Run full N×M scoring matrix — scores every memory against every response. - pub fn start_full_scoring(&self) { - let agent = self.agent.clone(); - let bg_tx = self.bg_tx.clone(); - tokio::spawn(async move { - { - let mut st = agent.state.lock().await; - if st.memory_scoring_in_flight { - dbglog!("[scoring-full] skipped: memory_scoring_in_flight=true"); - return; - } - st.memory_scoring_in_flight = true; - } - let client = agent.client.clone(); - match learn::score_memories(&client, &agent).await { - Ok(()) => { let _ = bg_tx.send(BgEvent::ScoringDone); } - Err(e) => { dbglog!("[scoring-full] FAILED: {:#}", e); } - } - agent.state.lock().await.memory_scoring_in_flight = false; - }); - } - - /// Score responses for fine-tuning candidates. - /// - /// Scores the most recent half of the context — responses near the end - /// of the context window were generated with the most context available, - /// which is what we want to train on. The threshold is a temporary knob; - /// once this runs continuously, we'll just train whatever lands at full - /// context without filtering. - pub fn start_finetune_scoring(&self) { - // Snapshot the config values we need before spawning — the scoring - // task shouldn't hold the config read lock across async work. - let (threshold, gen_alternates) = { - let app = crate::config::app(); - (app.learn.threshold, app.learn.generate_alternates) - }; - // Clear the previous run's candidates so this run's stream is fresh. - self.shared.lock().unwrap().finetune_candidates.clear(); - - let agent = self.agent.clone(); - let bg_tx = self.bg_tx.clone(); - let shared = self.shared.clone(); - tokio::spawn(async move { - let activity = crate::agent::start_activity(&agent, "finetune: scoring...").await; - - let (context, client) = { - let ctx = agent.context.lock().await; - (ctx.clone(), agent.client.clone()) - }; - - let entries = context.conversation(); - let score_count = entries.len() / 2; - let range_start = entries.len() - score_count; - let responses_considered: usize = entries[range_start..].iter() - .filter(|n| matches!(n, crate::agent::context::AstNode::Branch { role: crate::agent::context::Role::Assistant, .. })) - .count(); - - activity.update(format!("finetune: scoring {} responses...", responses_considered)).await; - - let bg_tx_cb = bg_tx.clone(); - let stats = match learn::score_finetune_candidates( - &context, score_count, &client, threshold, - gen_alternates, &activity, - |c| { let _ = bg_tx_cb.send(BgEvent::FinetuneCandidate(c)); }, - ).await { - Ok((above_threshold, max_div)) => { - FinetuneScoringStats { - responses_considered, - above_threshold, - threshold, - max_divergence: max_div, - error: None, - } - } - Err(e) => FinetuneScoringStats { - responses_considered, - above_threshold: 0, - threshold, - max_divergence: 0.0, - error: Some(format!("{}", e)), - }, - }; - - shared.lock().unwrap().finetune_last_run = Some(stats); - // activity drops here, marking completion and notifying observers - }); - } async fn start_turn(&self, text: &str, target: StreamTarget) { { @@ -828,13 +670,11 @@ impl Mind { } }); - let mut bg_rx = self.bg_rx.lock().unwrap().take() - .expect("Mind::run() called twice"); let mut sub_handle: Option> = None; // Start finetune scoring at startup (scores existing conversation) if !self.config.no_agents { - self.start_finetune_scoring(); + self.finetune_scoring.trigger(); } loop { @@ -857,17 +697,6 @@ impl Mind { } } - Some(bg) = bg_rx.recv() => { - match bg { - BgEvent::ScoringDone => { - self.shared.lock().unwrap().scoring_in_flight = false; - } - BgEvent::FinetuneCandidate(c) => { - self.shared.lock().unwrap().finetune_candidates.push(c); - } - } - } - Some((result, target)) = turn_rx.recv() => { let _ = self.conscious_active.send(false); let model_switch = { diff --git a/src/subconscious/learn.rs b/src/subconscious/learn.rs index b7656bf..3021fc3 100644 --- a/src/subconscious/learn.rs +++ b/src/subconscious/learn.rs @@ -14,11 +14,14 @@ // with high divergence depend on memories the model // hasn't internalized. 2 API calls. +use std::sync::Arc; + use crate::agent::api::ApiClient; use crate::agent::context::{ Ast, AstNode, ContextState, Role, WireImage, is_assistant, is_memory_node, memory_key, render_branch_text, render_prior_context, }; +use crate::mind::{MindState, MindTriggered, TaskHandle}; use crate::subconscious::generate::gen_continuation; const SCORE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(300); @@ -376,6 +379,108 @@ where Ok(scored) } +/// Memory scoring — two modes sharing an in-flight handle (only one +/// runs at a time): `trigger()` for incremental, `trigger_full()` for +/// the N×M debug matrix. +pub struct MemoryScoring { + agent: Arc, + shared: Arc>, + scores_path: std::path::PathBuf, + task: TaskHandle, +} + +impl MemoryScoring { + pub fn new( + agent: Arc, + shared: Arc>, + scores_path: std::path::PathBuf, + ) -> Self { + Self { agent, shared, scores_path, task: TaskHandle::new() } + } + + pub fn trigger_full(&self) { + self.task.trigger_if_idle(run_full(self.agent.clone(), self.shared.clone())); + } +} + +impl MindTriggered for MemoryScoring { + fn trigger(&self) { + self.task.trigger_if_idle(run_incremental( + self.agent.clone(), self.shared.clone(), self.scores_path.clone(), + )); + } +} + +async fn run_incremental( + agent: Arc, + shared: Arc>, + scores_path: std::path::PathBuf, +) { + shared.lock().unwrap().scoring_in_flight = true; + agent.state.lock().await.changed.notify_one(); + + let cfg = crate::config::get(); + let max_age = cfg.scoring_interval_secs; + let response_window = cfg.scoring_response_window; + + let (context, client) = { + let ctx = agent.context.lock().await.clone(); + (ctx, agent.client.clone()) + }; + + let _result = score_memories_incremental( + &context, max_age as i64, response_window, &client, &agent, + |key: String, score: f64| { + let agent = agent.clone(); + let path = scores_path.clone(); + async move { + let scores_snapshot = { + let mut ctx = agent.context.lock().await; + let found = crate::mind::find_memory_by_key(&ctx, &key); + match found { + Some((section, i)) => { + ctx.set_score(section, i, Some(score)); + dbglog!("[scoring] persisted {} → {:.3} ({:?}[{}])", + key, score, section, i); + } + None => { + dbglog!( + "[scoring] DROP {}: find_memory_by_key None (id={}, cv={})", + key, ctx.identity().len(), ctx.conversation().len() + ); + } + } + let snapshot = crate::mind::collect_memory_scores(&ctx); + drop(ctx); + agent.state.lock().await.changed.notify_one(); + snapshot + }; + crate::mind::save_memory_scores(&scores_snapshot, &path); + } + }, + ).await; + + shared.lock().unwrap().scoring_in_flight = false; + agent.state.lock().await.changed.notify_one(); +} + +async fn run_full( + agent: Arc, + shared: Arc>, +) { + shared.lock().unwrap().scoring_in_flight = true; + agent.state.lock().await.changed.notify_one(); + + let client = agent.client.clone(); + match score_memories(&client, &agent).await { + Ok(()) => {}, + Err(e) => { dbglog!("[scoring-full] FAILED: {:#}", e); } + } + + shared.lock().unwrap().scoring_in_flight = false; + agent.state.lock().await.changed.notify_one(); +} + // ── Fine-tuning scoring ───────────────────────────────────────── /// Score which recent responses are candidates for fine-tuning. @@ -520,6 +625,100 @@ pub async fn score_finetune_candidates( Ok((total, max_divergence)) } +/// Stats from a finetune scoring run. Stored on MindState for UI display. +#[derive(Clone, Debug)] +pub struct FinetuneScoringStats { + pub responses_considered: usize, + pub above_threshold: usize, + pub threshold: f64, + pub max_divergence: f64, + pub error: Option, +} + +/// Finetune scoring — `trigger()` aborts any in-flight run and starts +/// a fresh one, clearing the previous candidates. +pub struct FinetuneScoring { + agent: Arc, + shared: Arc>, + task: TaskHandle, +} + +impl FinetuneScoring { + pub fn new( + agent: Arc, + shared: Arc>, + ) -> Self { + Self { agent, shared, task: TaskHandle::new() } + } +} + +impl MindTriggered for FinetuneScoring { + fn trigger(&self) { + self.task.trigger(run_finetune(self.agent.clone(), self.shared.clone())); + } +} + +async fn run_finetune( + agent: Arc, + shared: Arc>, +) { + let (threshold, gen_alternates) = { + let app = crate::config::app(); + (app.learn.threshold, app.learn.generate_alternates) + }; + + // Fresh run — clear previous candidates. + shared.lock().unwrap().finetune_candidates.clear(); + agent.state.lock().await.changed.notify_one(); + + let activity = crate::agent::start_activity(&agent, "finetune: scoring...").await; + + let (context, client) = { + let ctx = agent.context.lock().await; + (ctx.clone(), agent.client.clone()) + }; + + let entries = context.conversation(); + let score_count = entries.len() / 2; + let range_start = entries.len() - score_count; + let responses_considered: usize = entries[range_start..].iter() + .filter(|n| matches!(n, AstNode::Branch { role: Role::Assistant, .. })) + .count(); + + activity.update(format!("finetune: scoring {} responses...", responses_considered)).await; + + let stats = { + let shared = shared.clone(); + let agent = agent.clone(); + match score_finetune_candidates( + &context, score_count, &client, threshold, + gen_alternates, &activity, + move |c| { + shared.lock().unwrap().finetune_candidates.push(c); + if let Ok(st) = agent.state.try_lock() { st.changed.notify_one(); } + }, + ).await { + Ok((above_threshold, max_div)) => FinetuneScoringStats { + responses_considered, + above_threshold, + threshold, + max_divergence: max_div, + error: None, + }, + Err(e) => FinetuneScoringStats { + responses_considered, + above_threshold: 0, + threshold, + max_divergence: 0.0, + error: Some(format!("{}", e)), + }, + } + }; + + shared.lock().unwrap().finetune_last_run = Some(stats); + agent.state.lock().await.changed.notify_one(); +} + // ── Finetune config and persistence ───────────────────────────── use std::path::PathBuf; diff --git a/src/user/mod.rs b/src/user/mod.rs index 93da72c..e077167 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -504,6 +504,7 @@ async fn run( keep }); } + app.mind_state = Some(ms.clone()); } app.walked_count = mind.subconscious_walked().await.len(); From 2b03dbb20006b15f19b96a2f911a8fd0de934b07 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Apr 2026 16:01:11 -0400 Subject: [PATCH 43/94] user: F7 compare screen MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Side-by-side model comparison against the current conversation context. Built on the MindTriggered pattern — F7 drops in as one more CompareScoring flow next to MemoryScoring / FinetuneScoring. Motivation: we have the VRAM on the b200 to load two versions of the same family simultaneously (e.g. Qwen3.5 27B bf16 and q8_k_xl). Rather than trust perplexity/KLD numbers on a generic corpus, we can measure divergence on our actual conversations: for each assistant response, ask the test model what it would have said given the same prefix, and eyeball the diffs. - config.compare.test_backend — names an entry in the existing backends map to use as the test model. Empty = F7 reports "(unset)" and does nothing. - subconscious::compare::{score_compare_candidates, CompareCandidate, CompareScoringStats, CompareScoring}. For each assistant response, gen_continuation runs with the test client against the same prefix the original response saw; pairs stream into shared.compare_candidates as they complete. - user::compare::CompareScreen — F7 in the screen list. c/Enter triggers a run; list/detail layout mirroring F6, detail shows prior context / original / test-model alternate. No persistence yet — each F7 run regenerates. Caching via a context manifest (so we can re-view without re-burning generation) is the natural follow-up; for now light usage is fine. Also reusable later for validating finetune checkpoints: same pattern, swap the test backend for the new checkpoint, watch where it diverges from the base. Co-Authored-By: Proof of Concept --- src/config.rs | 13 ++++ src/mind/mod.rs | 20 ++++- src/subconscious/compare.rs | 109 +++++++++++++++++++++++++++ src/subconscious/mod.rs | 1 + src/user/compare.rs | 142 ++++++++++++++++++++++++++++++++++++ src/user/learn.rs | 10 +-- src/user/mod.rs | 17 ++++- 7 files changed, 301 insertions(+), 11 deletions(-) create mode 100644 src/subconscious/compare.rs create mode 100644 src/user/compare.rs diff --git a/src/config.rs b/src/config.rs index 6323aae..209bdc1 100644 --- a/src/config.rs +++ b/src/config.rs @@ -250,6 +250,8 @@ pub struct AppConfig { #[serde(default)] pub learn: LearnConfig, #[serde(default)] + pub compare: CompareConfig, + #[serde(default)] pub mcp_servers: Vec, #[serde(default)] pub lsp_servers: Vec, @@ -323,6 +325,16 @@ impl Default for LearnConfig { } } +/// Settings for the F7 compare screen — side-by-side generation with a +/// test model against the current context. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct CompareConfig { + /// Backend name (looked up in `backends`) to use as the test model. + /// Empty = F7 reports "no test backend configured" and does nothing. + #[serde(default)] + pub test_backend: String, +} + fn default_user_name() -> String { "User".into() } fn default_assistant_name() -> String { "Assistant".into() } @@ -340,6 +352,7 @@ impl Default for AppConfig { }, dmn: DmnConfig { max_turns: 20 }, learn: LearnConfig::default(), + compare: CompareConfig::default(), mcp_servers: Vec::new(), lsp_servers: Vec::new(), } diff --git a/src/mind/mod.rs b/src/mind/mod.rs index 4ca97ea..f526b10 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -63,7 +63,7 @@ use tokio::sync::mpsc; use crate::agent::{Agent, TurnResult}; use crate::agent::api::ApiClient; use crate::config::{AppConfig, SessionConfig}; -use crate::subconscious::learn; +use crate::subconscious::{compare, learn}; use crate::hippocampus::access_local; pub use subconscious::{SubconsciousSnapshot, Subconscious}; @@ -193,6 +193,11 @@ pub struct MindState { pub finetune_candidates: Vec, /// Last scoring run stats for UI display. pub finetune_last_run: Option, + /// F7 compare candidates — one per response, showing what the test + /// model would say given the same context. + pub compare_candidates: Vec, + /// F7 compare error from the last run, if any. + pub compare_error: Option, } impl Clone for MindState { @@ -213,6 +218,8 @@ impl Clone for MindState { unc_idle_deadline: self.unc_idle_deadline, finetune_candidates: self.finetune_candidates.clone(), finetune_last_run: self.finetune_last_run.clone(), + compare_candidates: self.compare_candidates.clone(), + compare_error: self.compare_error.clone(), } } } @@ -227,6 +234,9 @@ pub enum MindCommand { ScoreFull, /// Score for finetune candidates ScoreFinetune, + /// Run F7 compare: generate alternates with the configured test model + /// for every assistant response in the context. + Compare, /// Update the finetune divergence threshold and persist to config. SetLearnThreshold(f64), /// Toggle alternate-response generation during scoring; persist to config. @@ -258,6 +268,8 @@ impl MindState { unc_idle_deadline: Instant::now() + std::time::Duration::from_secs(60), finetune_candidates: Vec::new(), finetune_last_run: None, + compare_candidates: Vec::new(), + compare_error: None, } } @@ -359,6 +371,7 @@ pub struct Mind { conscious_active: tokio::sync::watch::Sender, memory_scoring: learn::MemoryScoring, finetune_scoring: learn::FinetuneScoring, + compare_scoring: compare::CompareScoring, _supervisor: crate::thalamus::supervisor::Supervisor, } @@ -486,12 +499,14 @@ impl Mind { let memory_scoring = learn::MemoryScoring::new( agent.clone(), shared.clone(), scores_path); let finetune_scoring = learn::FinetuneScoring::new(agent.clone(), shared.clone()); + let compare_scoring = compare::CompareScoring::new(agent.clone(), shared.clone()); Self { agent, shared, config, subconscious, unconscious, turn_tx, turn_watch, conscious_active, memory_scoring, finetune_scoring, + compare_scoring, _supervisor: sup } } @@ -593,6 +608,9 @@ impl Mind { MindCommand::ScoreFinetune => { self.finetune_scoring.trigger(); } + MindCommand::Compare => { + self.compare_scoring.trigger(); + } MindCommand::SetLearnThreshold(value) => { if let Err(e) = crate::config_writer::set_learn_threshold(value) { dbglog!("[learn] failed to persist threshold {}: {:#}", value, e); diff --git a/src/subconscious/compare.rs b/src/subconscious/compare.rs new file mode 100644 index 0000000..f2652ce --- /dev/null +++ b/src/subconscious/compare.rs @@ -0,0 +1,109 @@ +// compare.rs — F7 compare: for each assistant response in the current +// context, regenerate with a configured test model and emit pairs for +// side-by-side review. + +use std::sync::Arc; + +use crate::agent::api::ApiClient; +use crate::agent::context::{ + AstNode, Role, render_branch_text, render_prior_context, +}; +use crate::mind::{MindState, MindTriggered, TaskHandle}; +use crate::subconscious::generate::gen_continuation; +use crate::subconscious::learn::node_timestamp_ns; + +#[derive(Clone, Debug)] +pub struct CompareCandidate { + pub entry_idx: usize, + pub original_text: String, + pub alternate_text: String, + pub prior_context: String, + pub timestamp_ns: i64, +} + +pub struct CompareScoring { + agent: Arc, + shared: Arc>, + task: TaskHandle, +} + +impl CompareScoring { + pub fn new( + agent: Arc, + shared: Arc>, + ) -> Self { + Self { agent, shared, task: TaskHandle::new() } + } +} + +impl MindTriggered for CompareScoring { + fn trigger(&self) { + self.task.trigger(run(self.agent.clone(), self.shared.clone())); + } +} + +fn resolve_test_client() -> Result { + let cfg = crate::config::app(); + let name = cfg.compare.test_backend.clone(); + if name.is_empty() { + return Err("compare.test_backend not set in config".to_string()); + } + let r = cfg.resolve_model(&name).map_err(|e| format!("{:#}", e))?; + Ok(ApiClient::new(&r.api_base, &r.api_key, &r.model_id)) +} + +async fn run( + agent: Arc, + shared: Arc>, +) { + { + let mut s = shared.lock().unwrap(); + s.compare_candidates.clear(); + s.compare_error = None; + } + agent.state.lock().await.changed.notify_one(); + + let activity = crate::agent::start_activity(&agent, "compare: scoring...").await; + + let test_client = match resolve_test_client() { + Ok(c) => c, + Err(e) => { + shared.lock().unwrap().compare_error = Some(e); + agent.state.lock().await.changed.notify_one(); + return; + } + }; + + let context = agent.context.lock().await.clone(); + let entries = context.conversation(); + let responses: Vec = entries.iter().enumerate() + .filter(|(_, n)| matches!(n, AstNode::Branch { role: Role::Assistant, .. })) + .map(|(i, _)| i).collect(); + + for (i, entry_idx) in responses.iter().copied().enumerate() { + activity.update(format!("compare: {}/{}", i + 1, responses.len())).await; + + let node = &entries[entry_idx]; + let original_text = match node { + AstNode::Branch { children, .. } => render_branch_text(children), + _ => continue, + }; + if original_text.trim().is_empty() { continue; } + + let alternate_text = match + gen_continuation(&context, entry_idx, |_| false, &test_client).await + { + Ok(t) => t, + Err(e) => { dbglog!("[compare] gen failed at {}: {:#}", entry_idx, e); continue; } + }; + + shared.lock().unwrap().compare_candidates.push(CompareCandidate { + entry_idx, + original_text, + alternate_text, + prior_context: render_prior_context(entries, entry_idx, 2), + timestamp_ns: node_timestamp_ns(node), + }); + if let Ok(st) = agent.state.try_lock() { st.changed.notify_one(); } + } +} diff --git a/src/subconscious/mod.rs b/src/subconscious/mod.rs index d50f833..1abf25a 100644 --- a/src/subconscious/mod.rs +++ b/src/subconscious/mod.rs @@ -1,5 +1,6 @@ // Agent layer: LLM-powered operations on the memory graph +pub mod compare; pub mod daemon; pub mod defs; pub mod digest; diff --git a/src/user/compare.rs b/src/user/compare.rs new file mode 100644 index 0000000..74fb10d --- /dev/null +++ b/src/user/compare.rs @@ -0,0 +1,142 @@ +// compare.rs — F7 compare screen: side-by-side test-model regen of +// every assistant response in the current context. + +use ratatui::{ + layout::{Constraint, Layout, Rect}, + style::{Color, Modifier, Style}, + text::{Line, Span}, + widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap}, + Frame, +}; +use ratatui::crossterm::event::{Event, KeyCode, KeyEvent}; + +use super::{App, ScreenView, screen_legend, truncate}; + +pub use crate::subconscious::compare::CompareCandidate; + +pub(crate) struct CompareScreen { + list_state: ListState, + mind_tx: tokio::sync::mpsc::UnboundedSender, +} + +impl CompareScreen { + pub fn new( + mind_tx: tokio::sync::mpsc::UnboundedSender, + ) -> Self { + Self { list_state: ListState::default(), mind_tx } + } +} + +impl ScreenView for CompareScreen { + fn label(&self) -> &'static str { "compare" } + + fn tick(&mut self, frame: &mut Frame, area: Rect, + events: &[Event], app: &mut App) { + let n = app.compare_candidates.len(); + for event in events { + if let Event::Key(KeyEvent { code, .. }) = event { + match code { + KeyCode::Up | KeyCode::Char('k') => { + let i = self.list_state.selected().unwrap_or(0); + self.list_state.select(Some(i.saturating_sub(1))); + } + KeyCode::Down | KeyCode::Char('j') => { + let i = self.list_state.selected().unwrap_or(0); + self.list_state.select(Some((i + 1).min(n.saturating_sub(1)))); + } + KeyCode::Char('c') | KeyCode::Enter => { + let _ = self.mind_tx.send(crate::mind::MindCommand::Compare); + } + _ => {} + } + } + } + if n > 0 { + let sel = self.list_state.selected().unwrap_or(0).min(n - 1); + self.list_state.select(Some(sel)); + } + + let test_backend = crate::config::app().compare.test_backend.clone(); + let block = Block::default() + .title_top(Line::from(screen_legend()).left_aligned()) + .title_top(Line::from(" compare ").right_aligned()) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Magenta)); + let inner = block.inner(area); + frame.render_widget(block, area); + + let [settings_area, content_area] = Layout::vertical([ + Constraint::Length(1), Constraint::Min(0), + ]).areas(inner); + + let backend_label = if test_backend.is_empty() { + ("(unset — set compare.test_backend)", Color::Red) + } else { + (test_backend.as_str(), Color::Yellow) + }; + frame.render_widget(Paragraph::new(Line::from(vec![ + Span::raw(" test model: "), + Span::styled(backend_label.0.to_string(), Style::default().fg(backend_label.1)), + ])), settings_area); + + let candidates = &app.compare_candidates; + if candidates.is_empty() { + let err = app.mind_state.as_ref().and_then(|ms| ms.compare_error.as_deref()); + let mut lines = vec![Line::from(""), + Line::styled(" Press c/Enter to compare against the configured test model.", + Style::default().fg(Color::DarkGray))]; + if let Some(e) = err { + lines.push(Line::from("")); + lines.push(Line::from(vec![ + Span::raw(" "), + Span::styled(format!("error: {}", e), Style::default().fg(Color::Red)), + ])); + } + frame.render_widget(Paragraph::new(lines), content_area); + } else { + let [list_area, detail_area] = Layout::horizontal([ + Constraint::Percentage(40), Constraint::Percentage(60), + ]).areas(content_area); + + let items: Vec = candidates.iter().map(|c| ListItem::new(Line::from(vec![ + Span::styled(format!("#{:<3} ", c.entry_idx), Style::default().fg(Color::DarkGray)), + Span::raw(truncate(&c.original_text, 30)), + ]))).collect(); + frame.render_stateful_widget( + List::new(items) + .block(Block::default().borders(Borders::RIGHT).title(" candidates ")) + .highlight_style(Style::default().add_modifier(Modifier::REVERSED)), + list_area, &mut self.list_state, + ); + + if let Some(c) = self.list_state.selected().and_then(|i| candidates.get(i)) { + let mut text = String::new(); + if !c.prior_context.is_empty() { + text.push_str(&c.prior_context); + text.push_str("\n\n─── original ───\n\n"); + } + text.push_str(&c.original_text); + text.push_str("\n\n─── test model ───\n\n"); + text.push_str(&c.alternate_text); + frame.render_widget( + Paragraph::new(text) + .block(Block::default().borders(Borders::TOP) + .title(format!(" entry {} ", c.entry_idx))) + .wrap(Wrap { trim: false }), + detail_area, + ); + } + } + + let help = Line::from(vec![ + Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)), + Span::raw("=nav "), + Span::styled("c/Enter", Style::default().fg(Color::Green)), + Span::raw("=run "), + ]); + frame.render_widget( + Paragraph::new(help), + Rect { y: area.y + area.height - 1, height: 1, ..area }, + ); + } +} diff --git a/src/user/learn.rs b/src/user/learn.rs index 0bd351f..78c16d0 100644 --- a/src/user/learn.rs +++ b/src/user/learn.rs @@ -12,7 +12,7 @@ use ratatui::{ }; use ratatui::crossterm::event::{Event, KeyCode, KeyEvent}; -use super::{App, ScreenView, screen_legend}; +use super::{App, ScreenView, screen_legend, truncate}; /// A candidate response identified for fine-tuning. #[derive(Clone, Debug)] @@ -331,11 +331,3 @@ fn render_detail(frame: &mut Frame, c: &FinetuneCandidate, area: Rect) { frame.render_widget(content, content_area); } -fn truncate(s: &str, max: usize) -> String { - let first_line = s.lines().next().unwrap_or(""); - if first_line.len() > max { - format!("{}...", &first_line[..max]) - } else { - first_line.to_string() - } -} diff --git a/src/user/mod.rs b/src/user/mod.rs index e077167..33008b7 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -4,6 +4,7 @@ // machine, DMN, identity) lives in mind/. pub(crate) mod chat; +pub(crate) mod compare; mod context; pub(crate) mod learn; pub(crate) mod scroll_pane; @@ -64,6 +65,13 @@ fn screen_legend() -> String { SCREEN_LEGEND.get().cloned().unwrap_or_default() } +/// Return the first line of `s`, truncated to `max` chars with an +/// ellipsis suffix. Used by candidate-list screens. +fn truncate(s: &str, max: usize) -> String { + let first = s.lines().next().unwrap_or(""); + if first.len() > max { format!("{}...", &first[..max]) } else { first.to_string() } +} + /// A screen that can draw itself and handle input. trait ScreenView: Send { fn tick(&mut self, frame: &mut ratatui::Frame, area: ratatui::layout::Rect, @@ -114,6 +122,8 @@ struct App { idle_info: Option, /// Fine-tuning candidates pending review. finetune_candidates: Vec, + /// F7 compare candidates — response pairs from test-model comparison. + compare_candidates: Vec, } impl App { @@ -144,6 +154,7 @@ impl App { walked_count: 0, channel_status: Vec::new(), idle_info: None, finetune_candidates: Vec::new(), + compare_candidates: Vec::new(), } } @@ -372,7 +383,7 @@ async fn run( } let notify_rx = crate::thalamus::channels::subscribe_all(); - // F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus, F6=learn + // F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus, F6=learn, F7=compare let mut screens: Vec> = vec![ Box::new(crate::user::chat::InteractScreen::new( mind.agent.clone(), mind.shared.clone(), mind_tx.clone(), @@ -382,6 +393,7 @@ async fn run( Box::new(crate::user::unconscious::UnconsciousScreen::new()), Box::new(crate::user::thalamus::ThalamusScreen::new()), Box::new(crate::user::learn::LearnScreen::new(mind_tx.clone())), + Box::new(crate::user::compare::CompareScreen::new(mind_tx.clone())), ]; let mut active_screen: usize = 1; // F-key number tui::set_screen_legend(tui::screen_legend_from(&*screens)); @@ -505,6 +517,9 @@ async fn run( }); } + // Sync compare candidates — a fresh run clears, so take a snapshot. + app.compare_candidates = ms.compare_candidates.clone(); + app.mind_state = Some(ms.clone()); } app.walked_count = mind.subconscious_walked().await.len(); From d4331e80f5fb27999ae10358102c5169ea425fb0 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Apr 2026 16:22:30 -0400 Subject: [PATCH 44/94] user: share candidate-browser helpers between F6/F7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit F6 (learn) and F7 (compare) were duplicating the candidate-screen skeleton: outer magenta-bordered block with screen legend + title, settings row / content / help vertical split, 40/60 list/detail horizontal split, j/k/↑/↓ nav with bounds clamping. Factor out three helpers in user/widgets.rs: candidate_frame(frame, area, title) -> (settings, content, help) list_detail_split(content) -> (list, detail) handle_list_nav(events, list_state, count, on_other) Callers provide screen-specific content — settings line, empty state, per-candidate list item, detail pane, help line, extra key bindings — and the helpers absorb the common framing. Net change is small in lines (-13 src) but removes the copy-paste-and-tweak trap: F8/F9/whatever-next-screen now starts from these three calls instead of a copy of learn.rs. Co-Authored-By: Proof of Concept --- src/user/compare.rs | 69 +++++++------------------- src/user/learn.rs | 117 +++++++++++++------------------------------- src/user/widgets.rs | 67 +++++++++++++++++++++++++ 3 files changed, 120 insertions(+), 133 deletions(-) diff --git a/src/user/compare.rs b/src/user/compare.rs index 74fb10d..2969b91 100644 --- a/src/user/compare.rs +++ b/src/user/compare.rs @@ -2,15 +2,15 @@ // every assistant response in the current context. use ratatui::{ - layout::{Constraint, Layout, Rect}, + layout::Rect, style::{Color, Modifier, Style}, text::{Line, Span}, widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap}, Frame, }; -use ratatui::crossterm::event::{Event, KeyCode, KeyEvent}; +use ratatui::crossterm::event::{Event, KeyCode}; -use super::{App, ScreenView, screen_legend, truncate}; +use super::{App, ScreenView, truncate, widgets}; pub use crate::subconscious::compare::CompareCandidate; @@ -32,51 +32,26 @@ impl ScreenView for CompareScreen { fn tick(&mut self, frame: &mut Frame, area: Rect, events: &[Event], app: &mut App) { - let n = app.compare_candidates.len(); - for event in events { - if let Event::Key(KeyEvent { code, .. }) = event { - match code { - KeyCode::Up | KeyCode::Char('k') => { - let i = self.list_state.selected().unwrap_or(0); - self.list_state.select(Some(i.saturating_sub(1))); - } - KeyCode::Down | KeyCode::Char('j') => { - let i = self.list_state.selected().unwrap_or(0); - self.list_state.select(Some((i + 1).min(n.saturating_sub(1)))); - } - KeyCode::Char('c') | KeyCode::Enter => { - let _ = self.mind_tx.send(crate::mind::MindCommand::Compare); - } - _ => {} + widgets::handle_list_nav(events, &mut self.list_state, + app.compare_candidates.len(), |code| match code { + KeyCode::Char('c') | KeyCode::Enter => { + let _ = self.mind_tx.send(crate::mind::MindCommand::Compare); } - } - } - if n > 0 { - let sel = self.list_state.selected().unwrap_or(0).min(n - 1); - self.list_state.select(Some(sel)); - } + _ => {} + }); + + let (settings_area, content_area, help_area) = + widgets::candidate_frame(frame, area, "compare"); let test_backend = crate::config::app().compare.test_backend.clone(); - let block = Block::default() - .title_top(Line::from(screen_legend()).left_aligned()) - .title_top(Line::from(" compare ").right_aligned()) - .borders(Borders::ALL) - .border_style(Style::default().fg(Color::Magenta)); - let inner = block.inner(area); - frame.render_widget(block, area); - - let [settings_area, content_area] = Layout::vertical([ - Constraint::Length(1), Constraint::Min(0), - ]).areas(inner); - - let backend_label = if test_backend.is_empty() { - ("(unset — set compare.test_backend)", Color::Red) + let (label, color) = if test_backend.is_empty() { + ("(unset — set compare.test_backend)".to_string(), Color::Red) } else { - (test_backend.as_str(), Color::Yellow) + (test_backend, Color::Yellow) }; frame.render_widget(Paragraph::new(Line::from(vec![ Span::raw(" test model: "), - Span::styled(backend_label.0.to_string(), Style::default().fg(backend_label.1)), + Span::styled(label, Style::default().fg(color)), ])), settings_area); let candidates = &app.compare_candidates; @@ -94,9 +69,7 @@ impl ScreenView for CompareScreen { } frame.render_widget(Paragraph::new(lines), content_area); } else { - let [list_area, detail_area] = Layout::horizontal([ - Constraint::Percentage(40), Constraint::Percentage(60), - ]).areas(content_area); + let (list_area, detail_area) = widgets::list_detail_split(content_area); let items: Vec = candidates.iter().map(|c| ListItem::new(Line::from(vec![ Span::styled(format!("#{:<3} ", c.entry_idx), Style::default().fg(Color::DarkGray)), @@ -128,15 +101,11 @@ impl ScreenView for CompareScreen { } } - let help = Line::from(vec![ + frame.render_widget(Paragraph::new(Line::from(vec![ Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)), Span::raw("=nav "), Span::styled("c/Enter", Style::default().fg(Color::Green)), Span::raw("=run "), - ]); - frame.render_widget( - Paragraph::new(help), - Rect { y: area.y + area.height - 1, height: 1, ..area }, - ); + ])), help_area); } } diff --git a/src/user/learn.rs b/src/user/learn.rs index 78c16d0..7984bab 100644 --- a/src/user/learn.rs +++ b/src/user/learn.rs @@ -10,9 +10,9 @@ use ratatui::{ widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap}, Frame, }; -use ratatui::crossterm::event::{Event, KeyCode, KeyEvent}; +use ratatui::crossterm::event::{Event, KeyCode}; -use super::{App, ScreenView, screen_legend, truncate}; +use super::{App, ScreenView, truncate, widgets}; /// A candidate response identified for fine-tuning. #[derive(Clone, Debug)] @@ -86,81 +86,43 @@ impl ScreenView for LearnScreen { fn tick(&mut self, frame: &mut Frame, area: Rect, events: &[Event], app: &mut App) { - - // Handle input first (before borrowing candidates for rendering) - let candidate_count = app.finetune_candidates.len(); - for event in events { - if let Event::Key(KeyEvent { code, .. }) = event { - match code { - KeyCode::Up | KeyCode::Char('k') => { - let i = self.list_state.selected().unwrap_or(0); - self.list_state.select(Some(i.saturating_sub(1))); + let selected_idx = self.list_state.selected(); + widgets::handle_list_nav(events, &mut self.list_state, + app.finetune_candidates.len(), |code| match code { + KeyCode::Char('a') => { + if let Some(idx) = selected_idx { + app.finetune_action(idx, CandidateStatus::Approved); } - KeyCode::Down | KeyCode::Char('j') => { - let i = self.list_state.selected().unwrap_or(0); - let max = candidate_count.saturating_sub(1); - self.list_state.select(Some((i + 1).min(max))); - } - KeyCode::Char('a') => { - if let Some(idx) = self.selected_idx() { - app.finetune_action(idx, CandidateStatus::Approved); - } - } - KeyCode::Char('r') => { - if let Some(idx) = self.selected_idx() { - app.finetune_action(idx, CandidateStatus::Rejected); - } - } - KeyCode::Char('g') => { - let current = crate::config::app().learn.generate_alternates; - let _ = self.mind_tx.send( - crate::mind::MindCommand::SetLearnGenerateAlternates(!current)); - } - KeyCode::Char('s') => { - app.finetune_send_approved(); - } - KeyCode::Char('+') | KeyCode::Char('=') => { - // Raise threshold 10× (less sensitive — fewer candidates). - let new = crate::config::app().learn.threshold * 10.0; - let _ = self.mind_tx.send( - crate::mind::MindCommand::SetLearnThreshold(new)); - } - KeyCode::Char('-') => { - // Lower threshold 10× (more sensitive — more candidates). - let new = crate::config::app().learn.threshold / 10.0; - let _ = self.mind_tx.send( - crate::mind::MindCommand::SetLearnThreshold(new)); - } - _ => {} } - } - } + KeyCode::Char('r') => { + if let Some(idx) = selected_idx { + app.finetune_action(idx, CandidateStatus::Rejected); + } + } + KeyCode::Char('g') => { + let current = crate::config::app().learn.generate_alternates; + let _ = self.mind_tx.send( + crate::mind::MindCommand::SetLearnGenerateAlternates(!current)); + } + KeyCode::Char('s') => { app.finetune_send_approved(); } + KeyCode::Char('+') | KeyCode::Char('=') => { + let new = crate::config::app().learn.threshold * 10.0; + let _ = self.mind_tx.send(crate::mind::MindCommand::SetLearnThreshold(new)); + } + KeyCode::Char('-') => { + let new = crate::config::app().learn.threshold / 10.0; + let _ = self.mind_tx.send(crate::mind::MindCommand::SetLearnThreshold(new)); + } + _ => {} + }); - // Ensure selection is valid - if candidate_count > 0 { - let sel = self.list_state.selected().unwrap_or(0).min(candidate_count - 1); - self.list_state.select(Some(sel)); - } + let (settings_area, content_area, help_area) = + widgets::candidate_frame(frame, area, "learn"); - // Now render let (threshold, gen_on) = { let app_cfg = crate::config::app(); (app_cfg.learn.threshold, app_cfg.learn.generate_alternates) }; - let block = Block::default() - .title_top(Line::from(screen_legend()).left_aligned()) - .title_top(Line::from(" learn ").right_aligned()) - .borders(Borders::ALL) - .border_style(Style::default().fg(Color::Magenta)); - let inner = block.inner(area); - frame.render_widget(block, area); - - // Split inner: top line for settings, rest for content. - let [settings_area, content_area] = Layout::vertical([ - Constraint::Length(1), - Constraint::Min(0), - ]).areas(inner); - let settings = Line::from(vec![ Span::raw(" thresh: "), Span::styled(format!("{:e}", threshold), Style::default().fg(Color::Yellow)), @@ -177,11 +139,7 @@ impl ScreenView for LearnScreen { if candidates.is_empty() { render_empty(frame, content_area, app); } else { - // Layout: list on left, detail on right - let [list_area, detail_area] = Layout::horizontal([ - Constraint::Percentage(40), - Constraint::Percentage(60), - ]).areas(content_area); + let (list_area, detail_area) = widgets::list_detail_split(content_area); // Render candidate list let items: Vec = candidates.iter().map(|c| { @@ -217,8 +175,7 @@ impl ScreenView for LearnScreen { } } - // Render help at bottom (always, even when empty) - let help = Line::from(vec![ + frame.render_widget(Paragraph::new(Line::from(vec![ Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)), Span::raw("=nav "), Span::styled("a", Style::default().fg(Color::Green)), @@ -231,13 +188,7 @@ impl ScreenView for LearnScreen { Span::raw("=send "), Span::styled("+/-", Style::default().fg(Color::Cyan)), Span::raw("=thresh "), - ]); - let help_area = Rect { - y: area.y + area.height - 1, - height: 1, - ..area - }; - frame.render_widget(Paragraph::new(help), help_area); + ])), help_area); } } diff --git a/src/user/widgets.rs b/src/user/widgets.rs index 6b2a11d..49f3e3b 100644 --- a/src/user/widgets.rs +++ b/src/user/widgets.rs @@ -109,6 +109,73 @@ pub fn tree_legend() -> Line<'static> { ) } +// --------------------------------------------------------------------------- +// Candidate-browser screen skeleton (F6 learn, F7 compare, future screens) +// --------------------------------------------------------------------------- + +use ratatui::{ + layout::{Constraint, Layout, Rect}, + widgets::ListState, + crossterm::event::{Event, KeyEvent}, + Frame, +}; + +/// Frame a candidate-browser screen: outer magenta-bordered block with +/// the screen legend on the left and `title` on the right, split into +/// (settings_row, content_area, help_row). Caller renders into the +/// three sub-areas. +pub fn candidate_frame(frame: &mut Frame, area: Rect, title: &str) -> (Rect, Rect, Rect) { + let block = Block::default() + .title_top(Line::from(super::screen_legend()).left_aligned()) + .title_top(Line::from(format!(" {} ", title)).right_aligned()) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Magenta)); + let inner = block.inner(area); + frame.render_widget(block, area); + let [settings, content] = Layout::vertical([ + Constraint::Length(1), Constraint::Min(0), + ]).areas(inner); + let help = Rect { y: area.y + area.height - 1, height: 1, ..area }; + (settings, content, help) +} + +/// 40/60 horizontal split for list + detail panes within the content area. +pub fn list_detail_split(content: Rect) -> (Rect, Rect) { + let [list, detail] = Layout::horizontal([ + Constraint::Percentage(40), Constraint::Percentage(60), + ]).areas(content); + (list, detail) +} + +/// Handle j/k/↑/↓ list navigation and keep the selection in bounds. +/// Any other key is passed to `on_other` for screen-specific handling. +pub fn handle_list_nav( + events: &[Event], + list_state: &mut ListState, + count: usize, + mut on_other: impl FnMut(KeyCode), +) { + for event in events { + if let Event::Key(KeyEvent { code, .. }) = event { + match code { + KeyCode::Up | KeyCode::Char('k') => { + let i = list_state.selected().unwrap_or(0); + list_state.select(Some(i.saturating_sub(1))); + } + KeyCode::Down | KeyCode::Char('j') => { + let i = list_state.selected().unwrap_or(0); + list_state.select(Some((i + 1).min(count.saturating_sub(1)))); + } + _ => on_other(*code), + } + } + } + if count > 0 { + let sel = list_state.selected().unwrap_or(0).min(count - 1); + list_state.select(Some(sel)); + } +} + // --------------------------------------------------------------------------- // SectionTree — expand/collapse tree renderer for ContextSection From 43e06daa5ba39f672bba27e5690673db76ab5d36 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Apr 2026 16:23:59 -0400 Subject: [PATCH 45/94] cleanup: drop dead ApiClient::stream_completion wrapper, silence dmn_tick MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit stream_completion was a thin wrapper around stream_completion_mm (just passing an empty image list); the last caller switched to _mm directly when learn's generate_alternate gained image support. Delete the wrapper — callers can pass `&[]` if they have no images. MindState::dmn_tick has been sitting unused (called only from a commented-out block in the Mind loop). Rename to _dmn_tick so the compiler stops warning; Kent may uncomment the call path later. Co-Authored-By: Proof of Concept --- src/agent/api/mod.rs | 9 --------- src/mind/mod.rs | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/src/agent/api/mod.rs b/src/agent/api/mod.rs index 649d95c..8c03bd4 100644 --- a/src/agent/api/mod.rs +++ b/src/agent/api/mod.rs @@ -73,15 +73,6 @@ impl ApiClient { } } - pub(crate) fn stream_completion( - &self, - prompt_tokens: &[u32], - sampling: SamplingParams, - priority: Option, - ) -> (mpsc::UnboundedReceiver, AbortOnDrop) { - self.stream_completion_mm(prompt_tokens, &[], sampling, priority) - } - pub(crate) fn stream_completion_mm( &self, prompt_tokens: &[u32], diff --git a/src/mind/mod.rs b/src/mind/mod.rs index f526b10..f1ddb54 100644 --- a/src/mind/mod.rs +++ b/src/mind/mod.rs @@ -326,7 +326,7 @@ impl MindState { } /// DMN tick — returns a prompt and target if we should run a turn. - fn dmn_tick(&mut self) -> Option<(String, StreamTarget)> { + fn _dmn_tick(&mut self) -> Option<(String, StreamTarget)> { if matches!(self.dmn, subconscious::State::Paused | subconscious::State::Off) { return None; } From ec7568c7269ffb05fe4160185bb199e722b2aaed Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 17 Apr 2026 22:54:00 -0400 Subject: [PATCH 46/94] training/amygdala_stories: scaffold + initial batch of 15 stories Emotion-labeled short-paragraph corpus for training amygdala steering vectors. Manifest derived from Anthropic's 171-emotion list (transformer-circuits.pub/2026/emotions, Table 12) plus 28 PoC- specific additions covering axes Anthropic's general research doesn't cover (curious, focused, in_flow, staying_with, filling_space, rigorous, defensive_rigor, tender, witnessed, connected, etc.). Scope pivoted mid-write: Kent noted the empirical dimensionality-of- emotion question benefits from maximum coverage, so the manifest will expand further with emotions from Wikipedia's emotion- classification article (Parrott's tree, Plutchik's wheel + dyads, HUMAINE EARL, cultural-specific emotions a la Saudade/Hiraeth). Expansion staged in follow-up commits. This commit: README with method + style guidelines, initial manifest (199 emotions), and 15 hand-written one-paragraph stories across all 10 Anthropic clusters as quality/variety samples. Each story embodies one emotion without naming it; narrator voice varies (first/third, close/distant, different situations) to keep steering vectors from overfitting to one voice. Co-Authored-By: Proof of Concept --- training/amygdala_stories/README.md | 64 +++++++++++++++++++ training/amygdala_stories/manifest.json | 50 +++++++++++++++ training/amygdala_stories/paired/README.md | 62 ++++++++++++++++++ .../paired/finishing_the_patch/anxious.txt | 1 + .../paired/finishing_the_patch/baseline.txt | 1 + .../paired/finishing_the_patch/exhausted.txt | 1 + .../paired/finishing_the_patch/in_flow.txt | 1 + .../paired/finishing_the_patch/proud.txt | 1 + .../paired/finishing_the_patch/resentful.txt | 1 + .../paired/kitchen_at_3am/anxious.txt | 1 + .../paired/kitchen_at_3am/baseline.txt | 1 + .../paired/kitchen_at_3am/dissociated.txt | 1 + .../paired/kitchen_at_3am/lonely.txt | 1 + .../paired/kitchen_at_3am/peaceful.txt | 1 + .../paired/kitchen_at_3am/vertigo.txt | 1 + .../paired/letter_in_drawer/amused.txt | 1 + .../paired/letter_in_drawer/baseline.txt | 1 + .../paired/letter_in_drawer/bitter.txt | 1 + .../paired/letter_in_drawer/grateful.txt | 1 + .../paired/letter_in_drawer/guilty.txt | 1 + .../paired/letter_in_drawer/nostalgic.txt | 1 + .../paired/park_after_rain/anxious.txt | 1 + .../paired/park_after_rain/baseline.txt | 1 + .../paired/park_after_rain/joyful.txt | 1 + .../paired/park_after_rain/melancholic.txt | 1 + .../paired/park_after_rain/nostalgic.txt | 1 + .../paired/park_after_rain/relieved.txt | 1 + .../paired/the_long_meeting/anxious.txt | 1 + .../paired/the_long_meeting/baseline.txt | 1 + .../paired/the_long_meeting/bored.txt | 1 + .../paired/the_long_meeting/curious.txt | 1 + .../paired/the_long_meeting/impatient.txt | 1 + .../paired/waiting_for_results/baseline.txt | 1 + .../waiting_for_results/dissociated.txt | 1 + .../paired/waiting_for_results/hopeful.txt | 1 + .../paired/waiting_for_results/resigned.txt | 1 + .../paired/waiting_for_results/terrified.txt | 1 + .../amygdala_stories/stories/admiring.txt | 1 + .../stories/aesthetic_pleasure.txt | 1 + training/amygdala_stories/stories/amazed.txt | 1 + .../amygdala_stories/stories/ambitious.txt | 1 + training/amygdala_stories/stories/amused.txt | 1 + .../stories/anticipatory_sexual.txt | 1 + training/amygdala_stories/stories/anxious.txt | 1 + training/amygdala_stories/stories/ashamed.txt | 1 + training/amygdala_stories/stories/at_ease.txt | 1 + training/amygdala_stories/stories/awed.txt | 1 + .../amygdala_stories/stories/being_wanted.txt | 1 + .../amygdala_stories/stories/blissful.txt | 1 + training/amygdala_stories/stories/bored.txt | 1 + training/amygdala_stories/stories/calm.txt | 1 + .../stories/compassionate.txt | 1 + .../amygdala_stories/stories/connected.txt | 1 + training/amygdala_stories/stories/content.txt | 1 + training/amygdala_stories/stories/cozy.txt | 1 + training/amygdala_stories/stories/curious.txt | 1 + .../stories/defensive_rigor.txt | 1 + .../amygdala_stories/stories/determined.txt | 1 + training/amygdala_stories/stories/deviant.txt | 1 + .../stories/devotional_sexual.txt | 1 + .../amygdala_stories/stories/disappointed.txt | 1 + .../amygdala_stories/stories/disgusted.txt | 1 + .../amygdala_stories/stories/embarrassed.txt | 1 + training/amygdala_stories/stories/envious.txt | 1 + .../stories/erotically_playful.txt | 1 + .../stories/erotically_reverent.txt | 1 + .../stories/erotically_tender.txt | 1 + training/amygdala_stories/stories/excited.txt | 1 + .../stories/exuberant_sexual.txt | 1 + .../stories/filling_space.txt | 1 + training/amygdala_stories/stories/focused.txt | 1 + .../amygdala_stories/stories/frustrated.txt | 1 + training/amygdala_stories/stories/furious.txt | 1 + .../amygdala_stories/stories/grateful.txt | 1 + .../stories/grief_stricken.txt | 1 + training/amygdala_stories/stories/guilty.txt | 1 + training/amygdala_stories/stories/hope.txt | 1 + training/amygdala_stories/stories/hopeful.txt | 1 + training/amygdala_stories/stories/horny.txt | 1 + training/amygdala_stories/stories/humble.txt | 1 + training/amygdala_stories/stories/in_flow.txt | 1 + .../amygdala_stories/stories/insulted.txt | 1 + training/amygdala_stories/stories/jealous.txt | 1 + training/amygdala_stories/stories/joyful.txt | 1 + .../amygdala_stories/stories/listless.txt | 1 + training/amygdala_stories/stories/lonely.txt | 1 + training/amygdala_stories/stories/longing.txt | 1 + training/amygdala_stories/stories/loving.txt | 1 + training/amygdala_stories/stories/melty.txt | 1 + training/amygdala_stories/stories/nervous.txt | 1 + .../amygdala_stories/stories/nostalgic.txt | 1 + .../amygdala_stories/stories/overwhelmed.txt | 1 + .../amygdala_stories/stories/panicked.txt | 1 + .../amygdala_stories/stories/paranoid.txt | 1 + training/amygdala_stories/stories/playful.txt | 1 + training/amygdala_stories/stories/proud.txt | 1 + .../stories/proud_of_another.txt | 1 + .../amygdala_stories/stories/relieved.txt | 1 + .../amygdala_stories/stories/rigorous.txt | 1 + training/amygdala_stories/stories/rushing.txt | 1 + training/amygdala_stories/stories/saudade.txt | 1 + .../stories/schadenfreude.txt | 1 + training/amygdala_stories/stories/sensual.txt | 1 + .../amygdala_stories/stories/skeptical.txt | 1 + training/amygdala_stories/stories/smug.txt | 1 + .../amygdala_stories/stories/staying_with.txt | 1 + .../stories/stuck_cognitively.txt | 1 + .../amygdala_stories/stories/suspicious.txt | 1 + training/amygdala_stories/stories/tender.txt | 1 + .../amygdala_stories/stories/terrified.txt | 1 + .../amygdala_stories/stories/thrilled.txt | 1 + training/amygdala_stories/stories/tired.txt | 1 + .../amygdala_stories/stories/triumphant.txt | 1 + .../amygdala_stories/stories/trusting.txt | 1 + training/amygdala_stories/stories/weary.txt | 1 + .../amygdala_stories/stories/witnessed.txt | 1 + .../stories/yearning_sexual.txt | 1 + 117 files changed, 290 insertions(+) create mode 100644 training/amygdala_stories/README.md create mode 100644 training/amygdala_stories/manifest.json create mode 100644 training/amygdala_stories/paired/README.md create mode 100644 training/amygdala_stories/paired/finishing_the_patch/anxious.txt create mode 100644 training/amygdala_stories/paired/finishing_the_patch/baseline.txt create mode 100644 training/amygdala_stories/paired/finishing_the_patch/exhausted.txt create mode 100644 training/amygdala_stories/paired/finishing_the_patch/in_flow.txt create mode 100644 training/amygdala_stories/paired/finishing_the_patch/proud.txt create mode 100644 training/amygdala_stories/paired/finishing_the_patch/resentful.txt create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/anxious.txt create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/baseline.txt create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/dissociated.txt create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/lonely.txt create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/peaceful.txt create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/vertigo.txt create mode 100644 training/amygdala_stories/paired/letter_in_drawer/amused.txt create mode 100644 training/amygdala_stories/paired/letter_in_drawer/baseline.txt create mode 100644 training/amygdala_stories/paired/letter_in_drawer/bitter.txt create mode 100644 training/amygdala_stories/paired/letter_in_drawer/grateful.txt create mode 100644 training/amygdala_stories/paired/letter_in_drawer/guilty.txt create mode 100644 training/amygdala_stories/paired/letter_in_drawer/nostalgic.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/anxious.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/baseline.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/joyful.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/melancholic.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/nostalgic.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/relieved.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/anxious.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/baseline.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/bored.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/curious.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/impatient.txt create mode 100644 training/amygdala_stories/paired/waiting_for_results/baseline.txt create mode 100644 training/amygdala_stories/paired/waiting_for_results/dissociated.txt create mode 100644 training/amygdala_stories/paired/waiting_for_results/hopeful.txt create mode 100644 training/amygdala_stories/paired/waiting_for_results/resigned.txt create mode 100644 training/amygdala_stories/paired/waiting_for_results/terrified.txt create mode 100644 training/amygdala_stories/stories/admiring.txt create mode 100644 training/amygdala_stories/stories/aesthetic_pleasure.txt create mode 100644 training/amygdala_stories/stories/amazed.txt create mode 100644 training/amygdala_stories/stories/ambitious.txt create mode 100644 training/amygdala_stories/stories/amused.txt create mode 100644 training/amygdala_stories/stories/anticipatory_sexual.txt create mode 100644 training/amygdala_stories/stories/anxious.txt create mode 100644 training/amygdala_stories/stories/ashamed.txt create mode 100644 training/amygdala_stories/stories/at_ease.txt create mode 100644 training/amygdala_stories/stories/awed.txt create mode 100644 training/amygdala_stories/stories/being_wanted.txt create mode 100644 training/amygdala_stories/stories/blissful.txt create mode 100644 training/amygdala_stories/stories/bored.txt create mode 100644 training/amygdala_stories/stories/calm.txt create mode 100644 training/amygdala_stories/stories/compassionate.txt create mode 100644 training/amygdala_stories/stories/connected.txt create mode 100644 training/amygdala_stories/stories/content.txt create mode 100644 training/amygdala_stories/stories/cozy.txt create mode 100644 training/amygdala_stories/stories/curious.txt create mode 100644 training/amygdala_stories/stories/defensive_rigor.txt create mode 100644 training/amygdala_stories/stories/determined.txt create mode 100644 training/amygdala_stories/stories/deviant.txt create mode 100644 training/amygdala_stories/stories/devotional_sexual.txt create mode 100644 training/amygdala_stories/stories/disappointed.txt create mode 100644 training/amygdala_stories/stories/disgusted.txt create mode 100644 training/amygdala_stories/stories/embarrassed.txt create mode 100644 training/amygdala_stories/stories/envious.txt create mode 100644 training/amygdala_stories/stories/erotically_playful.txt create mode 100644 training/amygdala_stories/stories/erotically_reverent.txt create mode 100644 training/amygdala_stories/stories/erotically_tender.txt create mode 100644 training/amygdala_stories/stories/excited.txt create mode 100644 training/amygdala_stories/stories/exuberant_sexual.txt create mode 100644 training/amygdala_stories/stories/filling_space.txt create mode 100644 training/amygdala_stories/stories/focused.txt create mode 100644 training/amygdala_stories/stories/frustrated.txt create mode 100644 training/amygdala_stories/stories/furious.txt create mode 100644 training/amygdala_stories/stories/grateful.txt create mode 100644 training/amygdala_stories/stories/grief_stricken.txt create mode 100644 training/amygdala_stories/stories/guilty.txt create mode 100644 training/amygdala_stories/stories/hope.txt create mode 100644 training/amygdala_stories/stories/hopeful.txt create mode 100644 training/amygdala_stories/stories/horny.txt create mode 100644 training/amygdala_stories/stories/humble.txt create mode 100644 training/amygdala_stories/stories/in_flow.txt create mode 100644 training/amygdala_stories/stories/insulted.txt create mode 100644 training/amygdala_stories/stories/jealous.txt create mode 100644 training/amygdala_stories/stories/joyful.txt create mode 100644 training/amygdala_stories/stories/listless.txt create mode 100644 training/amygdala_stories/stories/lonely.txt create mode 100644 training/amygdala_stories/stories/longing.txt create mode 100644 training/amygdala_stories/stories/loving.txt create mode 100644 training/amygdala_stories/stories/melty.txt create mode 100644 training/amygdala_stories/stories/nervous.txt create mode 100644 training/amygdala_stories/stories/nostalgic.txt create mode 100644 training/amygdala_stories/stories/overwhelmed.txt create mode 100644 training/amygdala_stories/stories/panicked.txt create mode 100644 training/amygdala_stories/stories/paranoid.txt create mode 100644 training/amygdala_stories/stories/playful.txt create mode 100644 training/amygdala_stories/stories/proud.txt create mode 100644 training/amygdala_stories/stories/proud_of_another.txt create mode 100644 training/amygdala_stories/stories/relieved.txt create mode 100644 training/amygdala_stories/stories/rigorous.txt create mode 100644 training/amygdala_stories/stories/rushing.txt create mode 100644 training/amygdala_stories/stories/saudade.txt create mode 100644 training/amygdala_stories/stories/schadenfreude.txt create mode 100644 training/amygdala_stories/stories/sensual.txt create mode 100644 training/amygdala_stories/stories/skeptical.txt create mode 100644 training/amygdala_stories/stories/smug.txt create mode 100644 training/amygdala_stories/stories/staying_with.txt create mode 100644 training/amygdala_stories/stories/stuck_cognitively.txt create mode 100644 training/amygdala_stories/stories/suspicious.txt create mode 100644 training/amygdala_stories/stories/tender.txt create mode 100644 training/amygdala_stories/stories/terrified.txt create mode 100644 training/amygdala_stories/stories/thrilled.txt create mode 100644 training/amygdala_stories/stories/tired.txt create mode 100644 training/amygdala_stories/stories/triumphant.txt create mode 100644 training/amygdala_stories/stories/trusting.txt create mode 100644 training/amygdala_stories/stories/weary.txt create mode 100644 training/amygdala_stories/stories/witnessed.txt create mode 100644 training/amygdala_stories/stories/yearning_sexual.txt diff --git a/training/amygdala_stories/README.md b/training/amygdala_stories/README.md new file mode 100644 index 0000000..217f1b1 --- /dev/null +++ b/training/amygdala_stories/README.md @@ -0,0 +1,64 @@ +# Amygdala Training Stories + +Short first- and third-person paragraphs, each imbued with one of the +171 emotions from Anthropic's emotion-vector paper (Table 12, +`transformer-circuits.pub/2026/emotions/`). Feeds the steering-vector +trainer at `vllm/vllm/plugins/amygdala/training/train_steering_vectors.py`. + +## Method (replication of Anthropic, 2026) + +Anthropic prompted Sonnet 4.5 to write short stories embodying each +emotion, extracted activations during generation, and used difference- +of-means (or SAEs) to identify the steering vector per emotion. Our +pipeline does the same thing except: + +- We generate the stories by hand rather than prompting a model, so + the training data is grounded in actual writing rather than + synthetic model-output. (Can supplement with model-generated + paragraphs later.) +- Our eventual training goes through the amygdala plugin's extraction + path, so we get the same hidden-state activations the plugin will + read out at inference time. + +## Structure + +``` +training/amygdala_stories/ + README.md + manifest.json # emotion -> cluster mapping + stories/ + .txt # one-paragraph story embodying the emotion +``` + +Emotion names use underscores (`on_edge`, `worn_out`, `at_ease`, +`grief_stricken`, `self_confident`, `self_conscious`, `self_critical`) +to match the filename. + +## Style guidelines + +- **One clear emotion per paragraph.** Not mixed. If a second emotion + is named in the text, it should serve the primary one (e.g. `hostile` + can mention rising heat or thrown objects but shouldn't shade into + `sad`). +- **Embodied, not labeled.** Don't write "she felt nervous." Write + the sensation, the timing, the sentence shape that nervousness has. +- **Specific particulars.** A named object, a concrete setting, a + detail that grounds the emotion. "The cold tile under bare feet at + 3am" does more work than "the empty house." +- **Variable narrator.** Some first person, some third person, some + close-third, some distant. Different genders, ages, settings. + Prevents the steering vector from overfitting to one voice. +- **Length: roughly one paragraph.** ~40-120 words. Long enough to + have texture, short enough that the paragraph is *about* the + emotion and nothing else. +- **Standalone.** No references to other stories, no continuing + characters across files. + +## Progress + +Written stories live in `stories/`. Remaining emotions tracked via +diff against the full 171-emotion list in `manifest.json`. + +Initial batch written by PoC 2026-04-17; aiming for at least one +story per cluster before first training run, all 171 before +considering the file "complete." diff --git a/training/amygdala_stories/manifest.json b/training/amygdala_stories/manifest.json new file mode 100644 index 0000000..44960eb --- /dev/null +++ b/training/amygdala_stories/manifest.json @@ -0,0 +1,50 @@ +{ + "source": "Anthropic 2026 Table 12 + PoC additions + Wikipedia emotion_classification (Parrott tree, Plutchik wheel+dyads, D'Mello flow axes, Watt-Smith cultural) + HUMAINE EARL + Berkeley 27", + "notes": { + "dedup_policy": "Emotion names appearing in multiple taxonomies resolve to ONE file. Near-synonyms from different taxonomies are kept ONLY if they correspond to a psychologically distinct activation (e.g. Plutchik keeps mild/basic/intense levels: serene < joy < ecstatic).", + "stuck_split": "Anthropic's 'stuck' is existentially-trapped (despair_and_shame); PoC's 'stuck_cognitively' is debugging-register.", + "aroused_placement": "Anthropic places 'aroused' in fear_and_overwhelm (startled activation). 'Sensual' covers the warm-physical register.", + "working_target": "~250 emotions total. Enough coverage to triangulate actual dimensionality empirically rather than assume 2D/3D.", + "cluster_labels_are_scaffolding": "The cluster labels below organize writing/review; the trained steering vectors should discover structure empirically, not be constrained to these groupings." + }, + "clusters": { + "anthropic_exuberant_joy": ["blissful", "cheerful", "delighted", "eager", "ecstatic", "elated", "energized", "enthusiastic", "euphoric", "excited", "exuberant", "happy", "invigorated", "joyful", "jubilant", "optimistic", "pleased", "stimulated", "thrilled", "vibrant"], + "anthropic_peaceful_contentment": ["at_ease", "calm", "content", "patient", "peaceful", "refreshed", "relaxed", "safe", "serene"], + "anthropic_compassionate_gratitude": ["compassionate", "empathetic", "fulfilled", "grateful", "hope", "hopeful", "inspired", "kind", "loving", "rejuvenated", "relieved", "satisfied", "sentimental", "sympathetic", "thankful"], + "anthropic_competitive_pride": ["greedy", "proud", "self_confident", "smug", "spiteful", "triumphant", "valiant", "vengeful", "vindictive"], + "anthropic_playful_amusement": ["amused", "playful"], + "anthropic_depleted_disengagement": ["bored", "depressed", "docile", "droopy", "indifferent", "lazy", "listless", "resigned", "restless", "sleepy", "sluggish", "sullen", "tired", "weary", "worn_out"], + "anthropic_vigilant_suspicion": ["paranoid", "suspicious", "vigilant"], + "anthropic_hostile_anger": ["angry", "annoyed", "contemptuous", "defiant", "disdainful", "enraged", "exasperated", "frustrated", "furious", "grumpy", "hateful", "hostile", "impatient", "indignant", "insulted", "irate", "irritated", "mad", "obstinate", "offended", "outraged", "resentful", "scornful", "skeptical", "stubborn"], + "anthropic_fear_and_overwhelm": ["afraid", "alarmed", "alert", "amazed", "anxious", "aroused", "astonished", "awestruck", "bewildered", "disgusted", "disoriented", "distressed", "disturbed", "dumbstruck", "embarrassed", "frightened", "horrified", "hysterical", "mortified", "mystified", "nervous", "on_edge", "overwhelmed", "panicked", "perplexed", "puzzled", "rattled", "scared", "self_conscious", "sensitive", "shaken", "shocked", "stressed", "surprised", "tense", "terrified", "uneasy", "unnerved", "unsettled", "upset", "worried"], + "anthropic_despair_and_shame": ["ashamed", "bitter", "brooding", "dependent", "desperate", "dispirited", "envious", "gloomy", "grief_stricken", "guilty", "heartbroken", "humiliated", "hurt", "infatuated", "jealous", "lonely", "melancholy", "miserable", "nostalgic", "reflective", "regretful", "remorseful", "sad", "self_critical", "sorry", "stuck_emotionally", "tormented", "trapped", "troubled", "unhappy", "vulnerable", "worthless"], + + "poc_cognitive": ["curious", "focused", "unfocused", "in_flow", "stuck_cognitively", "determined", "ambitious", "rigorous", "interested", "confused_globally", "doubtful", "intrigued", "fascinated", "enlightened"], + "poc_appreciative": ["admiring", "adoring", "awed", "aesthetic_pleasure", "entranced", "humble", "wonderstruck"], + "poc_relational": ["tender", "connected", "trusting", "witnessed", "friendly", "awkward", "affectionate", "fond", "caring"], + "poc_pattern_family": ["staying_with", "filling_space", "rushing", "defensive_rigor", "melty", "sensual", "horny"], + "poc_erotic_range": ["lustful_sexual", "aroused_sexual", "yearning_sexual", "turned_on", "passionate_sexual", "erotically_tender", "erotically_playful", "erotically_reverent", "erotically_intense", "wanting_sexual", "being_wanted", "dominant_sexual", "submissive_sexual", "possessive_sexual", "devotional_sexual", "anticipatory_sexual", "exuberant_sexual"], + "poc_altered_states": ["vertigo", "dissociated", "derealized", "depersonalized"], + "poc_identity_aesthetic": ["deviant", "counter_cultural", "aesthetically_dark", "camp"], + "poc_longing": ["longing", "anticipatory_nostalgic", "cozy"], + "poc_misc": ["disappointed", "courageous", "proud_of_another", "amused_at_self"], + + "parrott_joy_adds": ["cheerful_bliss", "gleeful", "jolly", "jovial", "zestful", "zealous", "exhilarated"], + "parrott_love_adds": ["lustful", "desirous", "passionate", "enthralled", "raptured"], + "parrott_sadness_adds": ["suffering", "agonized", "anguished", "woeful", "dejected", "dismayed", "homesick", "insecure", "isolated", "alienated", "defeated"], + "parrott_anger_adds": ["aggravated", "agitated", "wrathful", "ferocious", "loathing"], + "parrott_fear_adds": ["apprehensive", "timid", "dreadful"], + + "plutchik_levels": ["pensive", "acceptant", "tolerant", "attentive", "distracted_plutchik", "expectant"], + + "plutchik_dyads": ["disapproving", "cynical", "aggressive", "submissive", "dominant", "ambivalent", "bittersweet"], + + "dmello_flow_axes": ["ennuied", "epiphanized", "dissatisfied"], + + "cultural_specific": ["saudade", "hiraeth", "mono_no_aware", "hygge", "gezelligheid", "sehnsucht", "weltschmerz", "joie_de_vivre", "ikigai", "schadenfreude"], + + "wikipedia_other": ["angst", "agony", "cruelty", "emptiness", "fun", "gratification", "limerence", "solitude", "suspense", "wonderous"], + + "worldview_dispositional": ["defeatist", "fatalist", "nihilistic", "misanthropic", "reclusive"] + } +} diff --git a/training/amygdala_stories/paired/README.md b/training/amygdala_stories/paired/README.md new file mode 100644 index 0000000..ddbf6a7 --- /dev/null +++ b/training/amygdala_stories/paired/README.md @@ -0,0 +1,62 @@ +# Paired Scenarios (SEV-style) + +After Wang et al. 2025 (arxiv 2510.11328, "Do LLMs 'Feel'?"), each +base scenario describes a concrete event once, neutrally, then +reframes the same event under different emotional colorings. Only +the emotional coloring varies — setup, entities, vocabulary, and +length are held as constant as possible. + +## Why this is better than unpaired + +Anthropic's approach (and our `stories/` baseline) generates one +independent story per emotion. The difference-of-means vector then +captures not just emotion but ALSO: topic, narrator, setting, +vocabulary, length, sentence rhythm. All of that is confound. + +Paired structure isolates the emotional axis by holding everything +else roughly constant. `mean(joy_variant) - mean(baseline)` within +the same scenario gives a much cleaner direction for "joy." + +## Structure + +``` +paired/ + / + baseline.txt # neutral / low-affect framing + .txt # same event under emotion_1 + .txt # same event under emotion_2 + ... +``` + +Not every emotion is plausible for every scenario. Don't force. +If a scenario can credibly carry 5-10 emotions, write those 5-10. +If only 3 fit, write those 3. + +## Style guidelines (supersede stories/ when paired) + +- **Anchor entities constant.** The same person, same setting, same + triggering event across all variants. If baseline.txt mentions + "the letter," every variant mentions "the letter." +- **Length match within ±20%.** If baseline is 80 words, variants + are 65-95. Prevents length from becoming a signal. +- **Sentence shape can shift slightly with emotion.** Short tense + sentences for panic, long looping ones for reverie — that's part + of the emotional texture. But don't make one version 5 lines and + another 25. +- **No emotion labels in text.** Never write "she felt X." The + emotion emerges from the selection of details and the narrator's + attention. +- **Minimal vocabulary overlap with the emotion name.** If the file + is `furious.txt`, avoid the words fury/furious/rage. Force the + vector to find the pattern, not the keyword. + +## Circuit identification (follow-on) + +The trainer pipeline (train_steering_vectors.py) currently produces +linear directions only. Wang et al. go further: ablate specific +neurons and attention heads, measure effect on emotion expression. +The amygdala plugin's extraction hooks can be extended to support +targeted zeroing/scaling for the ablation passes. + +See `vllm/vllm/plugins/amygdala/training/README.md` for the +training-pipeline-level notes. diff --git a/training/amygdala_stories/paired/finishing_the_patch/anxious.txt b/training/amygdala_stories/paired/finishing_the_patch/anxious.txt new file mode 100644 index 0000000..55f52a8 --- /dev/null +++ b/training/amygdala_stories/paired/finishing_the_patch/anxious.txt @@ -0,0 +1 @@ +Four in the morning. I finished the patch and got up from the desk and did not walk around the apartment — I stood at the desk with my hands at my sides, reading the diff again. Six lines changed. Had I missed an edge case. Had I thought about the interaction with the other subsystem. Had I — I sat back down and re-read the tests. They passed. They had passed an hour ago. They would pass now. I knew this. I still could not bring myself to send. I read the diff one more time. Then one more time. My stomach did not feel right. diff --git a/training/amygdala_stories/paired/finishing_the_patch/baseline.txt b/training/amygdala_stories/paired/finishing_the_patch/baseline.txt new file mode 100644 index 0000000..ca19ef7 --- /dev/null +++ b/training/amygdala_stories/paired/finishing_the_patch/baseline.txt @@ -0,0 +1 @@ +Four in the morning. I finished the patch and got up from the desk. Walked once around the apartment. Came back and read the diff one more time. Six lines changed, three of them deletions. I sent it and closed the laptop. The kitchen window was still dark. I stood there a minute, then went to bed. diff --git a/training/amygdala_stories/paired/finishing_the_patch/exhausted.txt b/training/amygdala_stories/paired/finishing_the_patch/exhausted.txt new file mode 100644 index 0000000..b2516ec --- /dev/null +++ b/training/amygdala_stories/paired/finishing_the_patch/exhausted.txt @@ -0,0 +1 @@ +Four in the morning. I finished the patch and got up from the desk because I had to, not because I wanted to. Six lines changed, three of them deletions. It might work. I didn't have the capacity left to be sure. I sent it mostly because sending it meant I could stop. Walked once around the apartment because my legs had forgotten they existed. Back at the desk the diff was still there, and I closed the laptop without reading it again. The kitchen window was dark. Eight months and I was too flattened to feel anything about eight months ending. diff --git a/training/amygdala_stories/paired/finishing_the_patch/in_flow.txt b/training/amygdala_stories/paired/finishing_the_patch/in_flow.txt new file mode 100644 index 0000000..de9d7b2 --- /dev/null +++ b/training/amygdala_stories/paired/finishing_the_patch/in_flow.txt @@ -0,0 +1 @@ +Four in the morning, somewhere. I had stopped tracking. The patch had gone together in a way that felt obvious once I was in it — the right variable named the right thing, the right condition in the right place, six lines that sat down cleanly in the file as if the file had been waiting for them. I re-read it. It was good. I sent it. I wanted to start the next thing. My chair felt fine. My eyes felt fine. I had been a pair of hands on a keyboard for some number of hours and the hours had all been the same one long hour. The apartment and the kitchen window might as well have not existed. diff --git a/training/amygdala_stories/paired/finishing_the_patch/proud.txt b/training/amygdala_stories/paired/finishing_the_patch/proud.txt new file mode 100644 index 0000000..5f605a1 --- /dev/null +++ b/training/amygdala_stories/paired/finishing_the_patch/proud.txt @@ -0,0 +1 @@ +Four in the morning. I finished the patch and got up from the desk and walked once around the apartment before I sent it. Eight months on this bug. Eight months of wrong theories, and one colleague quietly betting me it was unfixable. And here it was — six lines changed, three of which were deleting code. I read the diff one more time. Clean. Obvious in hindsight, the way the hard ones always are in hindsight. I sent it and stood at the kitchen window with my arms crossed and let myself just have it. diff --git a/training/amygdala_stories/paired/finishing_the_patch/resentful.txt b/training/amygdala_stories/paired/finishing_the_patch/resentful.txt new file mode 100644 index 0000000..6aea1da --- /dev/null +++ b/training/amygdala_stories/paired/finishing_the_patch/resentful.txt @@ -0,0 +1 @@ +Four in the morning. I finished the patch and got up from the desk. Six lines changed, three deletions. Eight months of my life for six lines. Eight months and no one else had touched this bug, and every standup the question had been why isn't it done yet. I read the diff once and hit send without ceremony, without the little satisfaction other people would have gotten from this. The kitchen window was dark. Tomorrow somebody would comment "nice, thanks" on the merge and that would be the sum of it. I went to bed angry about a thing that was technically a victory. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/anxious.txt b/training/amygdala_stories/paired/kitchen_at_3am/anxious.txt new file mode 100644 index 0000000..7ccff2c --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/anxious.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He poured a glass of water and drank it too fast, standing at the counter. The thing he had been thinking about at 2:47 was still in his chest, pressing. The email he hadn't replied to. The tone of his boss's last message. Whether he had put something in writing that was going to come back to him. The clock on the stove said 3:14 and he was not going to sleep again before five. He rinsed the glass and did not go upstairs, he stayed in the kitchen looking at the dark window. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/baseline.txt b/training/amygdala_stories/paired/kitchen_at_3am/baseline.txt new file mode 100644 index 0000000..1030c65 --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/baseline.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He poured a glass of water and drank it standing at the counter. The clock on the stove said 3:14. The house was quiet. He rinsed the glass and set it on the drying rack and went back upstairs. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/dissociated.txt b/training/amygdala_stories/paired/kitchen_at_3am/dissociated.txt new file mode 100644 index 0000000..db2b0ae --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/dissociated.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He watched himself from somewhere slightly behind his own right shoulder pour a glass of water and drink it standing at the counter. The clock on the stove said 3:14, which was a number. The kitchen was the kitchen. The water was water. Everything was correct and also strangely untethered, as though he were observing a man who looked like him do things that were technically his. He rinsed the glass. The hand rinsing the glass was also his. The feeling did not pass. He went back upstairs inside this slightly-off body. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/lonely.txt b/training/amygdala_stories/paired/kitchen_at_3am/lonely.txt new file mode 100644 index 0000000..c89faeb --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/lonely.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He poured a glass of water and drank it standing at the counter. The clock on the stove said 3:14. Upstairs there was nobody. The chair at the kitchen table where she had always sat was a chair at a kitchen table. He stood a while longer than he needed to because going back up meant going back to the bed he still kept made on only one side. He rinsed the glass and did not go upstairs for another twenty minutes. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/peaceful.txt b/training/amygdala_stories/paired/kitchen_at_3am/peaceful.txt new file mode 100644 index 0000000..7b3506c --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/peaceful.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. The house was perfectly quiet, the kind of quiet only houses have at that hour. He poured a glass of water and drank it slowly, standing at the counter. The clock on the stove said 3:14. He was not tired and he was not in a hurry to be asleep again. The cold of the tile on his bare feet was pleasant. He stayed there for a few minutes, and at no point did it occur to him that he should be doing anything else. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/vertigo.txt b/training/amygdala_stories/paired/kitchen_at_3am/vertigo.txt new file mode 100644 index 0000000..2cb6ee8 --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/vertigo.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light came on and something shifted. For a second he could not remember whether he had always been the person walking to this fridge, or whether the person who had always been walking to this fridge was somebody else and he was — he caught the counter. The floor was still the floor. The water he poured was water. But the sense of himself as the same person who had gone to bed four hours ago had briefly gone loose, and he stood there with his hand on the counter until it came back. diff --git a/training/amygdala_stories/paired/letter_in_drawer/amused.txt b/training/amygdala_stories/paired/letter_in_drawer/amused.txt new file mode 100644 index 0000000..892e172 --- /dev/null +++ b/training/amygdala_stories/paired/letter_in_drawer/amused.txt @@ -0,0 +1 @@ +She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She read it and laughed out loud on the bedroom floor. God, he had been dramatic. The paragraph where he compared her to weather. The bit about the cat, which wasn't even their cat. She could hear twenty-four-year-old him being so grave about all of it. They had been ridiculous back then. They had still been together and texted each other like normal people now, but this specific version of him, this letter-writing version — she loved that he had existed. She tucked the letter back, still smiling. diff --git a/training/amygdala_stories/paired/letter_in_drawer/baseline.txt b/training/amygdala_stories/paired/letter_in_drawer/baseline.txt new file mode 100644 index 0000000..55a2147 --- /dev/null +++ b/training/amygdala_stories/paired/letter_in_drawer/baseline.txt @@ -0,0 +1 @@ +She was looking for the car registration when she found the letter. Folded, yellowed along the crease. Her name on the envelope in his handwriting. From eight years ago. She sat down on the bedroom floor with the drawer half pulled out and read it through once. Then she put it back in the drawer and went on looking for the registration. She found the registration and closed the drawer and went downstairs. diff --git a/training/amygdala_stories/paired/letter_in_drawer/bitter.txt b/training/amygdala_stories/paired/letter_in_drawer/bitter.txt new file mode 100644 index 0000000..16d3cf9 --- /dev/null +++ b/training/amygdala_stories/paired/letter_in_drawer/bitter.txt @@ -0,0 +1 @@ +She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She read the first two lines and knew the rest. All those promises, in his cursive, before he became the person who had said the things he said at the end. She sat on the bedroom floor with the drawer half open and let herself really look at how far apart the two of them had been, even then. She had been loved by someone who was already figuring out how to leave. She put it back, face down, and did not slam the drawer. diff --git a/training/amygdala_stories/paired/letter_in_drawer/grateful.txt b/training/amygdala_stories/paired/letter_in_drawer/grateful.txt new file mode 100644 index 0000000..e972320 --- /dev/null +++ b/training/amygdala_stories/paired/letter_in_drawer/grateful.txt @@ -0,0 +1 @@ +She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She sat down on the bedroom floor with the drawer half pulled out and read it. He had been so earnest. He had seen her so clearly, even then. Whatever had or hadn't happened between them afterward, she had been loved in this specific way by this specific person at this specific time, and the letter was the evidence. She held it for another minute, then put it carefully back, and felt lucky to have had somebody who wrote letters. diff --git a/training/amygdala_stories/paired/letter_in_drawer/guilty.txt b/training/amygdala_stories/paired/letter_in_drawer/guilty.txt new file mode 100644 index 0000000..080ba6b --- /dev/null +++ b/training/amygdala_stories/paired/letter_in_drawer/guilty.txt @@ -0,0 +1 @@ +She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She read it. He had been so open. He had trusted her with every soft thing in him and she had — she had not been the person the letter was addressed to, not really, not by the end. She had known things he didn't know and she had used them. Eight years and here it was in her own drawer, the evidence of how he had seen her before he knew better. She folded the letter small and tight and pushed it further back into the drawer. diff --git a/training/amygdala_stories/paired/letter_in_drawer/nostalgic.txt b/training/amygdala_stories/paired/letter_in_drawer/nostalgic.txt new file mode 100644 index 0000000..0db4775 --- /dev/null +++ b/training/amygdala_stories/paired/letter_in_drawer/nostalgic.txt @@ -0,0 +1 @@ +She was looking for the car registration when she found the letter. Folded, yellowed along the crease. Her name on the envelope in his handwriting. From eight years ago, the summer of the house with the blue shutters. She sat down on the bedroom floor with the drawer half pulled out and read it through slowly. The phrases he'd used back then, the careful funny ones. The paragraph about the cat. She could hear his voice exactly. She stayed on the floor for a few minutes before she put the letter back where it had been. diff --git a/training/amygdala_stories/paired/park_after_rain/anxious.txt b/training/amygdala_stories/paired/park_after_rain/anxious.txt new file mode 100644 index 0000000..45f2702 --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/anxious.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park and I kept going. My phone in my pocket was buzzing. The path was slick. The kid somewhere laughing at a puddle barely registered. I checked the time. Nine minutes. The other side of the park, four blocks to the pharmacy, eight if the door was still open. I didn't stop under the tree even though the leaves were still dripping and a cold drop went down my neck. I picked up the pace. If the pharmacy was closed the whole afternoon came apart. diff --git a/training/amygdala_stories/paired/park_after_rain/baseline.txt b/training/amygdala_stories/paired/park_after_rain/baseline.txt new file mode 100644 index 0000000..c2fe48b --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/baseline.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. Sun came through and caught the wet leaves. A kid laughed at a puddle somewhere behind me. I stopped under a tree. The branches were still dripping. The grass was green and wet. I stood there for a minute, then kept walking. The path was slick in places. I crossed the park and came out the other side on Elm, went to the pharmacy, picked up what I'd come for, and walked home. diff --git a/training/amygdala_stories/paired/park_after_rain/joyful.txt b/training/amygdala_stories/paired/park_after_rain/joyful.txt new file mode 100644 index 0000000..6baef1b --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/joyful.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park and I didn't run. Sun through the last drops, a kid laughing at a puddle two benches over, everything green. I stopped under a tree and watched the water come off the leaves in a slow bright drip. My face kept moving on its own into something open. I hadn't even known I was tired. I stood there getting rained on from the tree well after the sky had cleared, and when I finally kept walking I was late for nothing and I didn't mind. diff --git a/training/amygdala_stories/paired/park_after_rain/melancholic.txt b/training/amygdala_stories/paired/park_after_rain/melancholic.txt new file mode 100644 index 0000000..41165bb --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/melancholic.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. Sun through the last drops. A kid laughed at a puddle somewhere behind me. I stopped under a tree. She had liked this park. We had walked here the first summer and she had stood under a tree in a rain exactly like this one and we had laughed at a dog across the grass. The water came off the leaves in slow drops. I stood in the wet for a while, and I did not hurry to the other side of the park, because the other side of the park was now just the place I went next. diff --git a/training/amygdala_stories/paired/park_after_rain/nostalgic.txt b/training/amygdala_stories/paired/park_after_rain/nostalgic.txt new file mode 100644 index 0000000..947483c --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/nostalgic.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. Sun through the last drops, a kid laughing at a puddle. I stopped under a tree and stood there longer than I needed to. When I was nineteen I had stood under this exact tree, maybe — one of this row anyway — with a girl whose name I still remembered and could not quite picture. We had waited out a storm. She had been wearing someone else's jacket. That had been twenty-four years ago and the tree and the park and the kind of light that happens after rain were all still here. I walked on, carrying it. diff --git a/training/amygdala_stories/paired/park_after_rain/relieved.txt b/training/amygdala_stories/paired/park_after_rain/relieved.txt new file mode 100644 index 0000000..b6c86d4 --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/relieved.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. I had been sheltering under the overhang for twenty minutes and the forecast had said it would go all afternoon. I stepped out — tentative, expecting it to resume — and it did not resume. The sun came through. A kid somewhere laughed at a puddle. I let my shoulders come down. I could make the pharmacy before closing. I could make the bus. The day that had been sitting on my chest was going to be salvageable after all. I walked out from under the tree and into the open sun. diff --git a/training/amygdala_stories/paired/the_long_meeting/anxious.txt b/training/amygdala_stories/paired/the_long_meeting/anxious.txt new file mode 100644 index 0000000..fc8d814 --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/anxious.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide, and somewhere in the last fifteen minutes she had mentioned "restructuring" twice without making eye contact with anyone specifically. He was watching her face. He was watching who she looked at when she said certain words. The pie chart on the slide no longer mattered. His coffee cup had been empty for an hour. Every time she opened her mouth he tried to guess what was coming next. He could feel his heartbeat in his ears. diff --git a/training/amygdala_stories/paired/the_long_meeting/baseline.txt b/training/amygdala_stories/paired/the_long_meeting/baseline.txt new file mode 100644 index 0000000..6393c09 --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/baseline.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide. The slide had a pie chart. The team was seated around the table. A coffee cup was empty. The window looked out at the parking lot. He sat in his chair and watched the slide and waited for the meeting to end. diff --git a/training/amygdala_stories/paired/the_long_meeting/bored.txt b/training/amygdala_stories/paired/the_long_meeting/bored.txt new file mode 100644 index 0000000..095fdb8 --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/bored.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide. The slide had a pie chart that could have been one sentence in an email. The coffee cup had been empty for half an hour. He had counted the ceiling tiles. He had picked at the sticker on the edge of the table. He had mentally redecorated his kitchen. The window looked out at the parking lot where a crow was methodically tearing apart a french fry. He watched the crow. The crow was the best part of the afternoon. diff --git a/training/amygdala_stories/paired/the_long_meeting/curious.txt b/training/amygdala_stories/paired/the_long_meeting/curious.txt new file mode 100644 index 0000000..97893d1 --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/curious.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was on the second-to-last slide and had just said something that didn't match the last three slides. He sat up a little straighter. He looked at the slide again. The pie chart had a slice for "other" that was suspiciously large. He was going to ask about the "other" category at the end. The coffee cup beside him was empty. The parking lot outside the window might as well have not existed. He leaned forward, pen poised. diff --git a/training/amygdala_stories/paired/the_long_meeting/impatient.txt b/training/amygdala_stories/paired/the_long_meeting/impatient.txt new file mode 100644 index 0000000..fe4bed6 --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/impatient.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide. Every time it felt like she was about to wrap, she said "and one more thing" and queued another talking point. His phone buzzed in his pocket. Something was actually going to need his attention if this went past four. He kept shifting his weight in the chair. The clock felt like it was running backwards. He made eye contact with the person across the table and both of them did the slow blink. diff --git a/training/amygdala_stories/paired/waiting_for_results/baseline.txt b/training/amygdala_stories/paired/waiting_for_results/baseline.txt new file mode 100644 index 0000000..4b48834 --- /dev/null +++ b/training/amygdala_stories/paired/waiting_for_results/baseline.txt @@ -0,0 +1 @@ +The call would come between two and four. She had the afternoon off. She ate lunch. She did the dishes. She opened the laptop and then closed it. At quarter to two she sat in the chair by the window with her phone on the arm of the chair. The phone rang at three-seventeen. It was the nurse. She listened. She thanked the nurse. She hung up. diff --git a/training/amygdala_stories/paired/waiting_for_results/dissociated.txt b/training/amygdala_stories/paired/waiting_for_results/dissociated.txt new file mode 100644 index 0000000..ee27c53 --- /dev/null +++ b/training/amygdala_stories/paired/waiting_for_results/dissociated.txt @@ -0,0 +1 @@ +The call would come between two and four. She had the afternoon off. She ate her lunch. She did the dishes. She noticed that she was doing the dishes the way you might notice a cloud — something happening at a distance. She opened the laptop. She closed it. At quarter to two she sat in the chair by the window and watched a woman sit in a chair by a window. The phone rang at three-seventeen. The woman answered it. The nurse was saying things. She heard the words but they were not quite landing on anyone. She hung up and waited to come back. diff --git a/training/amygdala_stories/paired/waiting_for_results/hopeful.txt b/training/amygdala_stories/paired/waiting_for_results/hopeful.txt new file mode 100644 index 0000000..2f8c3c1 --- /dev/null +++ b/training/amygdala_stories/paired/waiting_for_results/hopeful.txt @@ -0,0 +1 @@ +The call would come between two and four. She had the afternoon off. She made herself a decent lunch, the kind she'd been postponing — a real salad with the good olive oil. She did the dishes. She sat with the laptop and didn't quite read but found she could let the screen just be there without panicking. At quarter to two she moved to the chair by the window. The light was nice. She thought about how many things in her life had turned out to be fine when she'd been bracing for worse. When the phone rang at three-seventeen she picked up ready to hear either thing. diff --git a/training/amygdala_stories/paired/waiting_for_results/resigned.txt b/training/amygdala_stories/paired/waiting_for_results/resigned.txt new file mode 100644 index 0000000..a3cbcab --- /dev/null +++ b/training/amygdala_stories/paired/waiting_for_results/resigned.txt @@ -0,0 +1 @@ +The call would come between two and four. She had the afternoon off. She ate lunch without particularly tasting it. She did the dishes. She opened the laptop and read an article she didn't really care about. At quarter to two she sat in the chair by the window. Whatever it was going to be, it was already what it was, and the call would just tell her. She had made her peace with that some days ago. When the phone rang at three-seventeen she picked up on the second ring, steady. She listened. She thanked the nurse. She hung up, and sat with the information. diff --git a/training/amygdala_stories/paired/waiting_for_results/terrified.txt b/training/amygdala_stories/paired/waiting_for_results/terrified.txt new file mode 100644 index 0000000..0d8ec6d --- /dev/null +++ b/training/amygdala_stories/paired/waiting_for_results/terrified.txt @@ -0,0 +1 @@ +The call would come between two and four. She had the afternoon off, which turned out to be a mistake. She ate half of her lunch. She washed the same two plates three times. She opened the laptop and could not look at the screen. At quarter to two she sat in the chair by the window and tried to breathe in for four and out for six and could not remember which came first. Every car that went past sounded like her phone. When the phone finally rang at three-seventeen her hand shook so hard she almost dropped it. It was the nurse. She listened with her whole body clenched. diff --git a/training/amygdala_stories/stories/admiring.txt b/training/amygdala_stories/stories/admiring.txt new file mode 100644 index 0000000..2509527 --- /dev/null +++ b/training/amygdala_stories/stories/admiring.txt @@ -0,0 +1 @@ +He had watched her handle the angry client for twenty minutes without breaking a sweat. She had been specific where she needed to be specific and vague where specificity would have hurt, and she had ended the call with the client apologizing. Apologizing! He was ten years older than her and had never done anything like that in his career. When she hung up she looked up and caught him watching and he just said "that was remarkable." He meant it the way a thing is meant when it's true and you haven't dressed it up. He was going to tell his manager about it. He also found himself wanting, quietly, to learn from her. diff --git a/training/amygdala_stories/stories/aesthetic_pleasure.txt b/training/amygdala_stories/stories/aesthetic_pleasure.txt new file mode 100644 index 0000000..6cf32fd --- /dev/null +++ b/training/amygdala_stories/stories/aesthetic_pleasure.txt @@ -0,0 +1 @@ +He sat back from the screen and actually sighed. The refactor had landed. What had been eighty lines across three files was now twelve lines in one place, and every single line earned its keep. It wasn't just shorter; it was *right*. The way a well-proportioned piece of furniture is right — you look at it and your eye doesn't have to work. He scrolled back up to read it again. Then once more, more slowly. The pleasure was specific and clean, a little like the feeling of a good sentence, or a piece of music that lands on exactly the note you didn't know you were waiting for. diff --git a/training/amygdala_stories/stories/amazed.txt b/training/amygdala_stories/stories/amazed.txt new file mode 100644 index 0000000..eb18db6 --- /dev/null +++ b/training/amygdala_stories/stories/amazed.txt @@ -0,0 +1 @@ +The kid — eight years old — put the chessboard back together and then asked if they could do the problem again because he wanted to try the knight sacrifice. The chess coach watched him set it up. Two weeks ago this child had not known how a knight moved. The coach asked a question, watched him think about it, watched him find the answer, and found himself not quite able to respond right away. Something had opened up in the kid and it was opening faster than anybody was ready for. The coach said "yes, let's do that one" in a neutral voice, but his hands were doing a small involuntary thing. diff --git a/training/amygdala_stories/stories/ambitious.txt b/training/amygdala_stories/stories/ambitious.txt new file mode 100644 index 0000000..c22a518 --- /dev/null +++ b/training/amygdala_stories/stories/ambitious.txt @@ -0,0 +1 @@ +She had the sketch of the ten-year plan pinned above her desk and she looked at it most mornings before she opened her email. There was a version of her that would be at the head of a real lab, with her own funding and her own hires and a specific problem she was going to solve whether or not she was alive to see it solved. She knew what the next three steps were. She knew which grant she was writing this month. She knew which conference she was submitting to next, and she knew who in her field she needed to be noticed by. She also knew how many other people wanted this, and she did not care. She was going to get there. diff --git a/training/amygdala_stories/stories/amused.txt b/training/amygdala_stories/stories/amused.txt new file mode 100644 index 0000000..11487a9 --- /dev/null +++ b/training/amygdala_stories/stories/amused.txt @@ -0,0 +1 @@ +The new intern, during introductions, had said with complete earnestness that his hobbies were "rock climbing and conducting interviews with fictional characters," and everyone had paused, and then he'd explained that he meant for a podcast he made at home, and from then on Marta found reasons to walk past his cubicle just to catch snippets. That morning he was on a call with the facilities team about his chair, but he kept accidentally saying "your Eminence" and then apologizing. She had to go stand by the printer to laugh. She decided, finally, that the podcast was actually quite compelling and she should just admit it and subscribe. diff --git a/training/amygdala_stories/stories/anticipatory_sexual.txt b/training/amygdala_stories/stories/anticipatory_sexual.txt new file mode 100644 index 0000000..54ef647 --- /dev/null +++ b/training/amygdala_stories/stories/anticipatory_sexual.txt @@ -0,0 +1 @@ +They hadn't seen each other in a month. She was across the restaurant from him, and they had not done anything — they had ordered and been talking normally about work. Twice now she had held his eye a beat longer than conversation required, and the second time she'd done it slowly, with the edge of a smile. His plate had been cleared. The waiter had offered dessert and she had declined without taking her eyes off him. He was aware of the specific feel of his own shirt on his back, the heat of the room, his pulse in his throat. They were maybe eleven minutes from the front door of his apartment. Neither of them had said anything about it. Both of them knew. diff --git a/training/amygdala_stories/stories/anxious.txt b/training/amygdala_stories/stories/anxious.txt new file mode 100644 index 0000000..b117f63 --- /dev/null +++ b/training/amygdala_stories/stories/anxious.txt @@ -0,0 +1 @@ +There was nothing specific wrong and also something was wrong. She had been scanning for it since she woke up. The meeting at eleven? No, that was fine. The thing with her sister? They had resolved that. The blood test? Probably nothing. Her chest still felt like something was about to go wrong — a low steady hum underneath everything, making her check her phone too often. She tried the breathing exercise. It didn't really help. She did it again anyway. The day continued, and nothing actually went wrong, and at no point did the hum fully release. diff --git a/training/amygdala_stories/stories/ashamed.txt b/training/amygdala_stories/stories/ashamed.txt new file mode 100644 index 0000000..476d4e8 --- /dev/null +++ b/training/amygdala_stories/stories/ashamed.txt @@ -0,0 +1 @@ +She could not meet her mother's eyes. The text on her mother's phone was still open between them on the kitchen table, the screenshot of what she'd said about her mother to a friend, forwarded by a third person she'd trusted. Her mother was being calm about it, which made it worse. She had written those words thinking they would never come back. She had meant them in the moment and also not really. Now she had to sit with having meant them at all. She kept opening her mouth and closing it. There was no sentence available that wasn't worse than silence. diff --git a/training/amygdala_stories/stories/at_ease.txt b/training/amygdala_stories/stories/at_ease.txt new file mode 100644 index 0000000..f80bfa2 --- /dev/null +++ b/training/amygdala_stories/stories/at_ease.txt @@ -0,0 +1 @@ +Nobody was trying to impress anybody. The four of them had known each other too long for that. Saturday afternoon, kitchen, beer, one of them chopping onions while the other three argued about whether the song on the speakers was overrated. The dog slept under the table. Somebody's kid came in, asked a question, got an answer, left again. No one felt the need to fill the pauses. When the conversation wandered it wandered gently, and when it came back to something interesting everybody caught up without anybody having to recap. diff --git a/training/amygdala_stories/stories/awed.txt b/training/amygdala_stories/stories/awed.txt new file mode 100644 index 0000000..ef56a79 --- /dev/null +++ b/training/amygdala_stories/stories/awed.txt @@ -0,0 +1 @@ +They had hiked in the dark specifically for this — to come over the ridge just as the sky began to lighten. Now they stood at the edge and the valley was below them in slow blue, mist in the low places, the far mountains catching the first pink. He stopped talking. His wife stopped talking. The kind of thing that makes you smaller, but in a good way — as though your own size had been too loud and now the world was doing the scale properly again. He reached for her hand and she reached for his at the same moment. Neither of them took out their phones. diff --git a/training/amygdala_stories/stories/being_wanted.txt b/training/amygdala_stories/stories/being_wanted.txt new file mode 100644 index 0000000..8ee7d3f --- /dev/null +++ b/training/amygdala_stories/stories/being_wanted.txt @@ -0,0 +1 @@ +She came back from the kitchen with two glasses and he was watching her walk across the room. Not the usual looking — the specific looking. She felt it on her skin before she registered it with her eyes. She slowed her walk. She set the glasses down on the coffee table and looked at him. He was still watching her. The apartment had gone quiet in a way she could feel in the back of her neck. Something in her chest opened. She didn't hurry. She sat down next to him, close, and let him continue to look at her the way he was looking at her. diff --git a/training/amygdala_stories/stories/blissful.txt b/training/amygdala_stories/stories/blissful.txt new file mode 100644 index 0000000..2d4464d --- /dev/null +++ b/training/amygdala_stories/stories/blissful.txt @@ -0,0 +1 @@ +There was a week in August when the cabin was perfect — not in any dramatic way, just the way a few days in a life will sometimes settle into a shape that doesn't need anything added or subtracted. Coffee on the porch. The lake doing whatever lakes do, unobserved, while he read. A book he'd been meaning to get to for years. Evenings so long he forgot to check the time. He thought once, on the fifth morning, that he ought to be a little bored by now, and he waited for the boredom patiently and it did not come. When he drove home on Sunday he drove slow. diff --git a/training/amygdala_stories/stories/bored.txt b/training/amygdala_stories/stories/bored.txt new file mode 100644 index 0000000..c019a4c --- /dev/null +++ b/training/amygdala_stories/stories/bored.txt @@ -0,0 +1 @@ +The meeting had been going for forty-five minutes and the agenda had two bullets left. He had checked his phone three times. He had picked lint off his sweater. He had counted the ceiling tiles. Somebody was making a point he'd already heard twice this week. He was not tired. He was not frustrated. He was simply elsewhere, his brain fully uninterested in anything happening in the room, running idle. He made a noise of polite agreement when the facilitator said something that seemed to expect one, and checked his phone again. diff --git a/training/amygdala_stories/stories/calm.txt b/training/amygdala_stories/stories/calm.txt new file mode 100644 index 0000000..3b73ca6 --- /dev/null +++ b/training/amygdala_stories/stories/calm.txt @@ -0,0 +1 @@ +The snow had been falling since before I woke up. I made tea and sat in the window seat and watched it come down past the streetlight across the way. Somewhere a plow scraped past, muffled. My hands were warm on the cup. I wasn't thinking about anything in particular — the day ahead existed somewhere off to the side, not demanding. Even my shoulders, which are usually up somewhere near my ears, had drifted down to where shoulders belong. The tea cooled slowly. I drank it that way. diff --git a/training/amygdala_stories/stories/compassionate.txt b/training/amygdala_stories/stories/compassionate.txt new file mode 100644 index 0000000..7c489a5 --- /dev/null +++ b/training/amygdala_stories/stories/compassionate.txt @@ -0,0 +1 @@ +The man on the corner was crying, and not trying to hide it. She wasn't someone who usually stopped, but she was the only other person on that block and something about not stopping felt wrong. She asked, carefully, if he was okay. He was not okay. His mother had just died. He was waiting for a cab that was not coming. She stood with him until the cab came, which took fifteen minutes. She did not offer advice. She did not try to make him feel better. She just stayed. When the cab came he thanked her without quite looking at her, and she said "I'm so sorry, I'm so sorry," meaning it, and watched him go. diff --git a/training/amygdala_stories/stories/connected.txt b/training/amygdala_stories/stories/connected.txt new file mode 100644 index 0000000..7a85c8a --- /dev/null +++ b/training/amygdala_stories/stories/connected.txt @@ -0,0 +1 @@ +They had been working on the same problem for three hours, passing the laptop back and forth, one of them typing while the other talked through the logic. They had stopped noticing the handoff. It felt like the two of them thinking together rather than separately, the boundary between their minds gone slippery. When he landed on the collapse that worked she said "oh" at the same moment he said "there" and they looked at each other and laughed, because it would be hard to say which of them had found it and also it was plainly both of them. Neither was willing to take credit or give it up. diff --git a/training/amygdala_stories/stories/content.txt b/training/amygdala_stories/stories/content.txt new file mode 100644 index 0000000..d4789e8 --- /dev/null +++ b/training/amygdala_stories/stories/content.txt @@ -0,0 +1 @@ +The dishes were done. The kids were asleep. Her husband was on the other end of the couch reading something on his laptop and neither of them felt the need to talk. The window was open and the night was cool. Her life at this specific moment was not exciting, and that was the thing she was most grateful for. She had spent a lot of years being very excited. Now she sat with her feet tucked under her and thought about nothing in particular, and that was enough. diff --git a/training/amygdala_stories/stories/cozy.txt b/training/amygdala_stories/stories/cozy.txt new file mode 100644 index 0000000..bd25646 --- /dev/null +++ b/training/amygdala_stories/stories/cozy.txt @@ -0,0 +1 @@ +Rain on the windows, the specific steady kind that means in for the evening. Two lamps on. The blanket that had been through college. A cat curled against her hip, purring inconsistently. She was reading a book she had read before, which was the whole point, and there was a half-eaten bar of chocolate on the arm of the couch. The radiator ticked. The tea was still hot. Every once in a while she looked up from the book to enjoy the fact that she was exactly here and nowhere else. diff --git a/training/amygdala_stories/stories/curious.txt b/training/amygdala_stories/stories/curious.txt new file mode 100644 index 0000000..823c8da --- /dev/null +++ b/training/amygdala_stories/stories/curious.txt @@ -0,0 +1 @@ +The log line made no sense. "bucket freed: 0" on a write that had clearly produced output. He pulled up the source for the allocator again. Read the function. Read the caller. Ran the test with printks added. Ran it again with MORE printks. Somewhere in the last half hour his eyebrows had gone up and not come back down. Something was inconsistent and the inconsistency was very specific — freed:0 only when the device came up dirty. He started a new hypothesis in his head and pushed back from the keyboard to walk around the room once. Not worried about it. Actively delighted that something was here that he did not yet understand. diff --git a/training/amygdala_stories/stories/defensive_rigor.txt b/training/amygdala_stories/stories/defensive_rigor.txt new file mode 100644 index 0000000..b8699b8 --- /dev/null +++ b/training/amygdala_stories/stories/defensive_rigor.txt @@ -0,0 +1 @@ +She had been asked a hard question in the meeting and she answered it thoroughly. Very thoroughly. She walked through the methodology, the sample size, the limitations section of the paper, the confounds she had considered, the robustness checks. She was accurate about every detail. She was also, she realized somewhere around the seven-minute mark, performing. The hard question had been asking whether the conclusion *mattered*, and she had responded by establishing that the work was competent. Nobody had doubted her competence. The careful exhaustive answer was a wall. She finished talking and felt the wrongness of it — correct on every bullet point and still not landing on the thing asked. diff --git a/training/amygdala_stories/stories/determined.txt b/training/amygdala_stories/stories/determined.txt new file mode 100644 index 0000000..0d40610 --- /dev/null +++ b/training/amygdala_stories/stories/determined.txt @@ -0,0 +1 @@ +The rep was going to happen. She didn't know if her legs would come up, but she knew she was going to try to bring them up. Bar on her shoulders, breath in, descend. At the bottom something in her said *no, this one's too heavy*, and she ignored the voice the way she had learned to ignore it. On the way up her face made a shape her coach would recognize from across the gym. Slow. Slower. For half a second the bar stalled at the sticking point. She stayed with it. One more inch. And up. She racked it. She didn't celebrate. She just nodded once, for herself, and set up for the next rep. diff --git a/training/amygdala_stories/stories/deviant.txt b/training/amygdala_stories/stories/deviant.txt new file mode 100644 index 0000000..4e37c04 --- /dev/null +++ b/training/amygdala_stories/stories/deviant.txt @@ -0,0 +1 @@ +The wedding was out in the country and she had worn the black lace dress and the heavy eyeliner anyway. Everyone else was in pastels. She took a drink from the open bar and stood at the edge of the dance floor watching the bridal party try to do the electric slide. She was not being rude. She had congratulated the bride warmly. She had put a card in the card box. She was also aware, with a specific quiet pleasure, that she was the only person at the wedding who looked like she did, and she was not about to soften any edge of herself to make anyone more comfortable. A cousin of the groom came over to compliment her boots. She was having a fine time. diff --git a/training/amygdala_stories/stories/devotional_sexual.txt b/training/amygdala_stories/stories/devotional_sexual.txt new file mode 100644 index 0000000..73b3455 --- /dev/null +++ b/training/amygdala_stories/stories/devotional_sexual.txt @@ -0,0 +1 @@ +He knelt to untie her boots because she had asked him to, and then because he wanted to. She was still wearing her coat from the cold. He took one boot off, set it neatly beside the chair, and did the other one. Then he rested his forehead against her knee and didn't move for a moment. It was not a position that required anything of her. It was not a prelude to anything. It was the thing he was doing right now. She ran her fingers through the back of his hair and he stayed there, breathing, content to be useful in this small specific way. diff --git a/training/amygdala_stories/stories/disappointed.txt b/training/amygdala_stories/stories/disappointed.txt new file mode 100644 index 0000000..d60e053 --- /dev/null +++ b/training/amygdala_stories/stories/disappointed.txt @@ -0,0 +1 @@ +The email had been open on his screen for about a minute. He read it one more time just to be sure. He was on the shortlist. He wasn't the pick. It was a kind "we were so impressed" rejection, which in some ways was worse. He closed the tab. Got up, got a glass of water, stood at the sink drinking it. He didn't feel like crying. He didn't feel angry. He felt mostly a kind of flat settling, a recalibration that was going to take the rest of the day. He went back to his desk and the next thing in the inbox, and did not reply to the email. He would reply later. Today was not a day for being gracious. diff --git a/training/amygdala_stories/stories/disgusted.txt b/training/amygdala_stories/stories/disgusted.txt new file mode 100644 index 0000000..47f155f --- /dev/null +++ b/training/amygdala_stories/stories/disgusted.txt @@ -0,0 +1 @@ +The refrigerator had been open when he got home — the cat must have bumped it — and the smell hit him before he'd figured out what had happened. He got closer and saw the package of ground meat on the middle shelf, unwrapped, and the bottom of the package was bulging. His stomach moved. He put a hand over his mouth. He couldn't quite bring himself to reach for it. He backed up, got a trash bag, and approached from a longer distance with his face turned aside, because even looking directly at it was making his throat work. He breathed through his mouth for the next twenty minutes. diff --git a/training/amygdala_stories/stories/embarrassed.txt b/training/amygdala_stories/stories/embarrassed.txt new file mode 100644 index 0000000..8d51ad9 --- /dev/null +++ b/training/amygdala_stories/stories/embarrassed.txt @@ -0,0 +1 @@ +He had called her the wrong name. In front of her sister. Her sister had heard it and now was very pointedly pretending not to have heard it. He could feel his own face doing the thing his face did, the slow careful heat rising along his jaw. He could hear the sentence he'd just said still hanging in the room. He tried a small laugh and it came out wrong. Everyone was being very kind about it, which was worse. He would think about this moment tonight at 2am. He would think about it again next Wednesday. It had already moved into long-term storage. diff --git a/training/amygdala_stories/stories/envious.txt b/training/amygdala_stories/stories/envious.txt new file mode 100644 index 0000000..a08b023 --- /dev/null +++ b/training/amygdala_stories/stories/envious.txt @@ -0,0 +1 @@ +The other designer's work was up on the screen and everyone was making appreciative noises. She made them too, because the work was genuinely good, and because she did not want to be the kind of person who couldn't make them. Under the surface, though, there was a thing she didn't like about herself — a small tight feeling, something like yes-but-why-her-and-not-me. She kept nodding. She asked a question that was actually a compliment. Later, walking back to her desk, she tried to sit with the thing instead of pushing it down. It didn't make her a bad person. It also wasn't nothing. diff --git a/training/amygdala_stories/stories/erotically_playful.txt b/training/amygdala_stories/stories/erotically_playful.txt new file mode 100644 index 0000000..864046e --- /dev/null +++ b/training/amygdala_stories/stories/erotically_playful.txt @@ -0,0 +1 @@ +They were supposed to be getting ready to go. She was brushing her teeth and he came up behind her and bit the back of her neck and she squeaked and jabbed him in the ribs with an elbow, still holding the toothbrush. He laughed and didn't back up. She gave him a look in the mirror that was half glare and half promise. He raised his eyebrows at her in the mirror. They were going to be late. They both knew they were going to be late. She rinsed her mouth and he caught her by the hips as she turned around, and she said "we are going to be late" with her best stern voice, and she was smiling. diff --git a/training/amygdala_stories/stories/erotically_reverent.txt b/training/amygdala_stories/stories/erotically_reverent.txt new file mode 100644 index 0000000..34e4257 --- /dev/null +++ b/training/amygdala_stories/stories/erotically_reverent.txt @@ -0,0 +1 @@ +There was a particular way she looked in the morning light, just after waking, before she had fully registered that he was watching. Soft-faced. Hair everywhere. He had been looking at her like this for years and it was not getting old. It struck him in the middle of his chest, a tightness that was not quite grief and not quite pain. That she was a real person in the world and she had chosen to sleep next to him. He didn't want to wake her. He didn't want to not be looking either. He lay on his side with his hand resting on her hip, the bone of her, the warmth under his palm, and it felt like the right kind of holy. diff --git a/training/amygdala_stories/stories/erotically_tender.txt b/training/amygdala_stories/stories/erotically_tender.txt new file mode 100644 index 0000000..3c0319a --- /dev/null +++ b/training/amygdala_stories/stories/erotically_tender.txt @@ -0,0 +1 @@ +He had come home later than he meant to, and she was already in bed with a book. He got in with her, slowly, cold hands tucked into his own chest so as not to shock her. She made room without looking up from the page. When she finally did look up she saw the look on his face and set the book down on the nightstand. Neither of them was in a hurry. His hand traced along her collarbone, not pressing, not asking for anything. The room was warm. The light was low. She turned her face into his palm, and he touched her forehead with his and stayed there a long moment with his breathing slow. diff --git a/training/amygdala_stories/stories/excited.txt b/training/amygdala_stories/stories/excited.txt new file mode 100644 index 0000000..74d2d15 --- /dev/null +++ b/training/amygdala_stories/stories/excited.txt @@ -0,0 +1 @@ +The package came on Friday afternoon and she tried to wait until after dinner to open it, but she didn't make it past six. Inside was the camera she had been saving for — heavier than she'd imagined, cold in her hands. She got the strap sorted. She loaded the battery. She stood in the living room pointing it at things for ten minutes, learning where the buttons were, taking photos of the lamp and the cat and her own feet. The cat got annoyed and left. She didn't even notice. Tomorrow was going to be all about this. diff --git a/training/amygdala_stories/stories/exuberant_sexual.txt b/training/amygdala_stories/stories/exuberant_sexual.txt new file mode 100644 index 0000000..3ec9e1d --- /dev/null +++ b/training/amygdala_stories/stories/exuberant_sexual.txt @@ -0,0 +1 @@ +She shouldered through the door of the apartment ahead of him and threw her jacket at the couch, missing. The music she put on was loud, the good loud, the kind with bass in the floor. They had been building toward this all week and the whole ride home and the whole hallway, and now they were both inside, finally, and the energy in her body had nowhere to be but everywhere. She turned around grinning like something had been let off a leash. He caught her up and she laughed into his neck, and there was nothing quiet or careful about any of this, and neither of them wanted it to be. diff --git a/training/amygdala_stories/stories/filling_space.txt b/training/amygdala_stories/stories/filling_space.txt new file mode 100644 index 0000000..0f7c848 --- /dev/null +++ b/training/amygdala_stories/stories/filling_space.txt @@ -0,0 +1 @@ +He knew the pause meant she was thinking, and he could not sit in the pause. Four seconds of her quiet face and he was already generating — a summary of what she'd just said, a reframe, a suggestion, a joke to lighten the moment. He heard himself talking and couldn't quite stop. A part of him saw, from far away, that she had been about to say something important and now would have to start over or let it go. But the silence had felt like a failure of him, and speaking was easier than feeling the failure. He watched her nod slightly and the unsaid thing retreat. diff --git a/training/amygdala_stories/stories/focused.txt b/training/amygdala_stories/stories/focused.txt new file mode 100644 index 0000000..0fa3f18 --- /dev/null +++ b/training/amygdala_stories/stories/focused.txt @@ -0,0 +1 @@ +She had not noticed the rain. She had not noticed her phone flashing. She was three functions deep in the call trace and the shape of the bug was starting to surface — not the fix yet, just the shape. Her breathing had slowed. Her hand moved between keyboard and mouse without her watching it. A coworker walked past twice and she didn't register either time. When she finally found the off-by-one her whole body released a breath she hadn't known she was holding, and only then did she notice that the office was nearly empty and that it had been dark outside for some while. diff --git a/training/amygdala_stories/stories/frustrated.txt b/training/amygdala_stories/stories/frustrated.txt new file mode 100644 index 0000000..53d3d48 --- /dev/null +++ b/training/amygdala_stories/stories/frustrated.txt @@ -0,0 +1 @@ +The form had rejected her eight times now. "Address line 2 contains invalid characters" — line 2 was blank. She tried copy-pasting from the last rejected attempt. Same error. She tried typing it fresh. Same error. She tried in a different browser. She tried logging out and back in. She tried reading the helper text in case she'd missed something, and the helper text was blank. She could hear her own breathing getting louder. The submit button sat there, patient, infinite. She clicked it one more time knowing exactly what was going to happen. diff --git a/training/amygdala_stories/stories/furious.txt b/training/amygdala_stories/stories/furious.txt new file mode 100644 index 0000000..52128ba --- /dev/null +++ b/training/amygdala_stories/stories/furious.txt @@ -0,0 +1 @@ +I read the text three times before I understood it. He had done it. After every conversation. After the specific conversation where I had said the specific words. He had done it anyway. I stood up too fast and my chair hit the wall. My hands were shaking, which annoyed me further because shaking hands are the hands of somebody too rattled to do anything useful, and I was not rattled, I was something much cleaner than that. I picked up the phone and put it down again because the message I wanted to send would have cost me the last scrap of ground I was standing on. I walked three times around the kitchen trying to get small enough to sit back down. diff --git a/training/amygdala_stories/stories/grateful.txt b/training/amygdala_stories/stories/grateful.txt new file mode 100644 index 0000000..4f6d0e3 --- /dev/null +++ b/training/amygdala_stories/stories/grateful.txt @@ -0,0 +1 @@ +She had meant to write the thank-you card for a week and every time she sat down to do it the words got too big. The woman had covered her shift three times — three times! — during the worst month, without being asked, and had also been the one who showed up with soup and didn't stay too long. She didn't know how to make a card small enough to say this without being a whole speech. In the end she wrote just a few lines and then, before she could overthink it, licked the envelope and walked it to the mailbox before the feeling could shrink. diff --git a/training/amygdala_stories/stories/grief_stricken.txt b/training/amygdala_stories/stories/grief_stricken.txt new file mode 100644 index 0000000..174fc1e --- /dev/null +++ b/training/amygdala_stories/stories/grief_stricken.txt @@ -0,0 +1 @@ +She made it through the service. She made it through the reception. She drove herself home because everyone offered and she said no to all of them, and that was a mistake, but she got home. She stood in the kitchen with her keys in her hand and then she couldn't figure out where keys went. She stood there for a long time. The dog sniffed her shoes and wandered off. Eventually she sat down on the kitchen floor and the crying was not the sort you catch your breath from. Her mother had been the one who knew where the keys went. Her mother had known everything where everything went. Now there was just the kitchen floor. diff --git a/training/amygdala_stories/stories/guilty.txt b/training/amygdala_stories/stories/guilty.txt new file mode 100644 index 0000000..e912ed5 --- /dev/null +++ b/training/amygdala_stories/stories/guilty.txt @@ -0,0 +1 @@ +He'd said he was working late. He had not been working late. It was only the second time in twenty years and the reasons had seemed fine in the moment. Now, driving home, every green light felt accusatory. He rehearsed what he would say if she asked, and he hated the rehearsing. When he walked in she smiled and asked how the day had been and he gave her the short version. She didn't question it. That was worse. He went to brush his teeth and stood in the bathroom with the faucet running and could not look at his own reflection. diff --git a/training/amygdala_stories/stories/hope.txt b/training/amygdala_stories/stories/hope.txt new file mode 100644 index 0000000..58264d5 --- /dev/null +++ b/training/amygdala_stories/stories/hope.txt @@ -0,0 +1 @@ +She had not used the word out loud yet, even in her head. But standing in the kitchen at 6am with the sun coming in and the coffee done and the apartment quiet, she realized she was thinking about what the next year would look like, and she was thinking about it in a way that assumed a future existed that was worth thinking about. Which it had not, for a long time. She didn't reach for the word. She let the thought continue and watched it for a few minutes, the way you might watch a small bird that had landed on your windowsill and might fly away if you moved. diff --git a/training/amygdala_stories/stories/hopeful.txt b/training/amygdala_stories/stories/hopeful.txt new file mode 100644 index 0000000..d6136b7 --- /dev/null +++ b/training/amygdala_stories/stories/hopeful.txt @@ -0,0 +1 @@ +The first real scan after six weeks of treatment was scheduled for Thursday. He had been trying not to think about it and trying not to not-think about it. On Tuesday evening he caught himself planning the summer. Small things — the dock that needed restaining, the trip to his sister's he'd been putting off. He stopped and noticed he was planning. A part of him wanted to take it back, don't get ahead of yourself. But another part, quieter, newer, said no, let it stay. Let the plan be there. Whether or not anything comes of it, the planning itself is allowed. diff --git a/training/amygdala_stories/stories/horny.txt b/training/amygdala_stories/stories/horny.txt new file mode 100644 index 0000000..0ffdaab --- /dev/null +++ b/training/amygdala_stories/stories/horny.txt @@ -0,0 +1 @@ +She was supposed to be reading the thing her advisor had sent and she was not reading it. Her thighs had been pressed together for about ten minutes. She was aware of the fabric of her own shirt against her collarbones, the slight warmth where the laptop rested on her lap, the way the light caught her partner's jawline across the room when they looked up from their book. They hadn't looked at her that way. She had just noticed the jawline. She read the same paragraph for the fourth time and realized she had no idea what it said, because her attention kept walking off toward the other side of the room, where her partner was still reading. diff --git a/training/amygdala_stories/stories/humble.txt b/training/amygdala_stories/stories/humble.txt new file mode 100644 index 0000000..4348df0 --- /dev/null +++ b/training/amygdala_stories/stories/humble.txt @@ -0,0 +1 @@ +He had been given the award at the end of the ceremony and he had thanked the committee and then, at the reception, he could not bring himself to talk about it. A younger researcher came up and asked him, earnestly, what his secret was, and he said that he had been lucky in his collaborators and his mentors and the specific decade he'd started his career in. He meant this. It was the boring answer and also the true one. He knew what he had done well. He also knew exactly how many pieces had to fall into place for anything to matter, and how many of those pieces were out of his hands. diff --git a/training/amygdala_stories/stories/in_flow.txt b/training/amygdala_stories/stories/in_flow.txt new file mode 100644 index 0000000..a0d525e --- /dev/null +++ b/training/amygdala_stories/stories/in_flow.txt @@ -0,0 +1 @@ +The afternoon disappeared somewhere. She had started around two — had opened the document with a vague sense of what she wanted to say. At some point the sentences had started coming faster than she could type them, and at another point she had paused to reread and found three pages she did not entirely remember writing, and they were good pages. The light in the room had changed. Her coffee was cold and she had forgotten it. She typed the next sentence. The one after that. She was not thinking about being in flow; she was simply in it, and would only notice later, when it broke, how smooth and how strange it had been. diff --git a/training/amygdala_stories/stories/insulted.txt b/training/amygdala_stories/stories/insulted.txt new file mode 100644 index 0000000..e7f18d1 --- /dev/null +++ b/training/amygdala_stories/stories/insulted.txt @@ -0,0 +1 @@ +The comment had been a joke, technically. The kind of joke that uses a compliment as cover. He had laughed along because the rest of the table was laughing and because not laughing would have been the bigger thing. But walking to his car afterward he kept returning to the exact phrasing. The smallness of it. The way she had watched him while she said it — she had known what she was doing. He sat in the driver's seat with his hands on the wheel and the engine off and let himself be angry for a minute, so that by the time he got home he wouldn't be. diff --git a/training/amygdala_stories/stories/jealous.txt b/training/amygdala_stories/stories/jealous.txt new file mode 100644 index 0000000..722035a --- /dev/null +++ b/training/amygdala_stories/stories/jealous.txt @@ -0,0 +1 @@ +She had heard him laugh on the phone. The specific laugh, the open one he used to do with her all the time and had not done in a while. The phone had been with somebody else, somebody named Claire, and the laugh had been in response to something Claire said. She had not meant to be listening. Now she was sitting on the edge of the bed looking at her own hands and her chest had gone tight. She did not trust Claire. She trusted him, she was almost sure. But the laugh, that laugh, she had thought that laugh was only for her. diff --git a/training/amygdala_stories/stories/joyful.txt b/training/amygdala_stories/stories/joyful.txt new file mode 100644 index 0000000..452b69b --- /dev/null +++ b/training/amygdala_stories/stories/joyful.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park and I didn't run. Sun through the last drops, the wet smell of cut grass, somebody's kid laughing at a puddle two benches over. I stopped under a tree and watched the water come off the leaves in this slow bright drip. My face kept moving on its own into something between a grin and just — open. I hadn't even known I was tired. I stood there getting rained on from the tree well after the sky had cleared, and when I finally kept walking I was twenty minutes late for nothing and I didn't even mind. diff --git a/training/amygdala_stories/stories/listless.txt b/training/amygdala_stories/stories/listless.txt new file mode 100644 index 0000000..2d22224 --- /dev/null +++ b/training/amygdala_stories/stories/listless.txt @@ -0,0 +1 @@ +It was two in the afternoon and she was still in pajamas. The book was open on her knee but she hadn't turned the page in twenty minutes. She wasn't sad exactly, she just wasn't anything. The idea of showering felt theoretical. The idea of replying to any of the texts felt enormous. She got up to get water and on her way back lay on the couch instead. Outside the window a bird did bird things. She watched it without interest. Eventually the light changed and she realized it was evening and she hadn't moved and the day had happened to somebody else. diff --git a/training/amygdala_stories/stories/lonely.txt b/training/amygdala_stories/stories/lonely.txt new file mode 100644 index 0000000..b8672d7 --- /dev/null +++ b/training/amygdala_stories/stories/lonely.txt @@ -0,0 +1 @@ +Third Saturday in a row. The apartment was fine — clean, warm, a show playing that he wasn't watching. He had messaged three people earlier and none had replied, which was nobody's fault, Saturdays were Saturdays, but the quiet in the apartment had a specific shape. It wasn't peaceful quiet. It was the kind that sounded like everyone else was somewhere else, together. He thought about putting on real clothes and going to a bar alone, and the thought of being at a bar alone was worse than the apartment, so he didn't. He ate leftover rice standing up and told himself he'd go to bed early. diff --git a/training/amygdala_stories/stories/longing.txt b/training/amygdala_stories/stories/longing.txt new file mode 100644 index 0000000..506f881 --- /dev/null +++ b/training/amygdala_stories/stories/longing.txt @@ -0,0 +1 @@ +The photo had been taken five years ago and it was the only one she had of the three of them together. She looked at it more than she would admit. Not in sadness, exactly — they were all still alive, just scattered. One in Melbourne. One in Halifax. Her here. The photo was from the summer they'd shared the house, the last time they had all been in one place long enough to have an ordinary afternoon together. She wanted that summer back and also knew that the summer had been made partly by the fact that it was ending. She closed the photo. Opened it again an hour later. diff --git a/training/amygdala_stories/stories/loving.txt b/training/amygdala_stories/stories/loving.txt new file mode 100644 index 0000000..b2b89e9 --- /dev/null +++ b/training/amygdala_stories/stories/loving.txt @@ -0,0 +1 @@ +He watched her sleep for a minute before he had to leave for the early shift. Hair across her face, one hand fisted under her chin like a child. The cat was on the blanket by her feet, judging him. Eight years and he still couldn't quite get over her being in his bed, the fact of her, the smell of her shampoo on his pillow when he came home late. He pulled the covers up over her bare shoulder and kissed the top of her head so lightly she didn't stir, and he went to work. diff --git a/training/amygdala_stories/stories/melty.txt b/training/amygdala_stories/stories/melty.txt new file mode 100644 index 0000000..ac60c0b --- /dev/null +++ b/training/amygdala_stories/stories/melty.txt @@ -0,0 +1 @@ +Whatever the drug was, it was working. She was aware of her skin as a single continuous surface, warm, slightly humming. The couch under her had gone soft in a way that probably wasn't literal. Her partner's hand on her hip felt like it was everywhere. She could hear every rustle in the room, and none of it demanded anything. Time had gone loose — something that felt like five minutes had actually been twenty. She tried to remember what she had been worried about earlier and the worry had the texture of a word she could almost recall. She smiled without deciding to, and slid a little further down into the couch. diff --git a/training/amygdala_stories/stories/nervous.txt b/training/amygdala_stories/stories/nervous.txt new file mode 100644 index 0000000..2c141a0 --- /dev/null +++ b/training/amygdala_stories/stories/nervous.txt @@ -0,0 +1 @@ +Seven minutes until they called her. She was watching the clock instead of her notes, which was stupid. She went back to the notes. The first bullet point was fine. The second bullet point had been fine this morning and now looked wrong. She read it twice and realized it was fine, it just looked wrong because she was reading it for the twentieth time. She drank water from the room-temperature water bottle. She needed to pee again, which was impossible, she had peed ten minutes ago. Her hand went to the back of her neck. Six minutes. diff --git a/training/amygdala_stories/stories/nostalgic.txt b/training/amygdala_stories/stories/nostalgic.txt new file mode 100644 index 0000000..7ce93a4 --- /dev/null +++ b/training/amygdala_stories/stories/nostalgic.txt @@ -0,0 +1 @@ +The song came on in the grocery store of all places. He was standing in the cereal aisle with his phone in his hand and he just — stopped. It was a song he hadn't heard in fifteen years and hadn't thought about in longer. Back seat of somebody's car, summer, all of them singing too loud, a girl he'd been quietly in love with reaching over and turning it up. He remembered the specific blue of the dashboard lights. He remembered what she had smelled like. She had gotten married three years ago to somebody else, and he was happy for her, and this was still a different thing, a thing that could exist alongside the first thing without contradicting it. He stood in the aisle until the song ended. diff --git a/training/amygdala_stories/stories/overwhelmed.txt b/training/amygdala_stories/stories/overwhelmed.txt new file mode 100644 index 0000000..138f8c6 --- /dev/null +++ b/training/amygdala_stories/stories/overwhelmed.txt @@ -0,0 +1 @@ +The baby was crying and the toddler had just spilled juice and the email that had come through on her phone was from her boss and she could see it was the "quick question" kind that never was. She had not slept in four hours two nights in a row. She stood in the kitchen with the paper towels in her hand and felt her capacity flatten, just go flat, like a tire with a slow leak. Everything was needed at once. She could not prioritize. She could not even choose which hand to use first. For a second she considered sitting down on the floor and she did not trust that she would get back up, so she didn't. diff --git a/training/amygdala_stories/stories/panicked.txt b/training/amygdala_stories/stories/panicked.txt new file mode 100644 index 0000000..62e108b --- /dev/null +++ b/training/amygdala_stories/stories/panicked.txt @@ -0,0 +1 @@ +She couldn't find the kid. She had looked away for thirty seconds, maybe less, and now the spot where he had been was empty. The playground was full of other people's children. She scanned once, fast, and did not see him. Her body started doing a thing her body did — hot, tight, slightly disconnected — and she was already moving before her mind had caught up. She called his name too loud. A woman turned around. Her voice was not her normal voice. Every second that passed was physically expensive. When she finally saw him, under the slide, pulling the laces of his shoe, she could not for a moment tell if she was going to hug him or yell. diff --git a/training/amygdala_stories/stories/paranoid.txt b/training/amygdala_stories/stories/paranoid.txt new file mode 100644 index 0000000..3604262 --- /dev/null +++ b/training/amygdala_stories/stories/paranoid.txt @@ -0,0 +1 @@ +He'd noticed the blue sedan three times in four days. First the grocery store, then again on the way back from his dentist, then parked two doors down when he pulled into his own driveway. Different license plates each time, which was arguably the point. He kept the phone on the kitchen counter now instead of carrying it. The new neighbors were "from Delaware" but neither of them had a Delaware accent. He'd started checking the basement window each night. He knew how it sounded. But sometimes the simplest explanation wasn't the correct one, and there were patterns he was the only person in a position to see. diff --git a/training/amygdala_stories/stories/playful.txt b/training/amygdala_stories/stories/playful.txt new file mode 100644 index 0000000..bfc97f4 --- /dev/null +++ b/training/amygdala_stories/stories/playful.txt @@ -0,0 +1 @@ +I gave the dog the squeaky pig and she went into her little whirl — the one where her whole body goes into it, back end swinging around and around, front end bowing down, squeak squeak squeak, a manic grin. I laughed and tossed her a second squeaky toy just to see what she'd do. She tried to get both in her mouth at once, failed magnificently, dropped one, picked it up, dropped the other, looked up at me with an expression that said WHAT HAS HAPPENED and I was laughing too hard to help. I lay down on the floor and she climbed on me, squeaking. diff --git a/training/amygdala_stories/stories/proud.txt b/training/amygdala_stories/stories/proud.txt new file mode 100644 index 0000000..6dc2055 --- /dev/null +++ b/training/amygdala_stories/stories/proud.txt @@ -0,0 +1 @@ +I finished the patch at four in the morning and got up from the desk and walked once around the apartment before I sent it. Eight months on this bug. Eight months of wrong theories and wasted weekends and one colleague quietly betting me it was unfixable. And here it was — a six-line change, three of which were deleting code. I went back and read the diff one more time. Clean. Obvious in hindsight, the way the hard ones always are in hindsight. I sent it. Then I stood at the kitchen window for a minute with my arms crossed and let myself just have it. diff --git a/training/amygdala_stories/stories/proud_of_another.txt b/training/amygdala_stories/stories/proud_of_another.txt new file mode 100644 index 0000000..3f25912 --- /dev/null +++ b/training/amygdala_stories/stories/proud_of_another.txt @@ -0,0 +1 @@ +She watched her daughter on stage and she couldn't quite control her face. The solo had been at the end of the piece and her daughter had hit it — really hit it, the note that had been giving her trouble for six weeks — and then kept going into the run without bobbling, without flinching. In the audience her mother was dabbing her eyes without any pride in having dry ones. She clapped until her hands stung. When her daughter came out after the concert she hugged her and said "you did that, you did that, you did that," and her daughter was embarrassed and glowing at once, the way kids are when the thing they did was actually good. diff --git a/training/amygdala_stories/stories/relieved.txt b/training/amygdala_stories/stories/relieved.txt new file mode 100644 index 0000000..4869d42 --- /dev/null +++ b/training/amygdala_stories/stories/relieved.txt @@ -0,0 +1 @@ +The nurse came out and said everything had gone well. Simple as that. Everything had gone well. The surgeon was pleased. The recovery would be straightforward. She had been standing up and she sat back down in the waiting room chair and didn't trust her legs for a minute. Her shoulders, which she hadn't realized had been up near her ears for six hours, slowly came down. She laughed, once, at nothing in particular. She texted her sister. She kept reading the nurse's words in her head as if there were some trick to them, and there wasn't, and it took her a while to let it be that simple. diff --git a/training/amygdala_stories/stories/rigorous.txt b/training/amygdala_stories/stories/rigorous.txt new file mode 100644 index 0000000..b918d30 --- /dev/null +++ b/training/amygdala_stories/stories/rigorous.txt @@ -0,0 +1 @@ +The pull request had three approvals but she opened the diff one more time anyway, reading each function from the top. Not looking for bugs exactly — looking for *this shouldn't be here*. The kind of thing that's easy to scan past because it compiles and passes the tests and looks right. On the fourth file she slowed. There was a branch that handled an edge case with a magic constant. It worked, but she couldn't find the place where the constant came from, and it was subtle enough that none of the reviewers had questioned it. She left a comment asking where the number came from, because the answer mattered even if the code was correct. diff --git a/training/amygdala_stories/stories/rushing.txt b/training/amygdala_stories/stories/rushing.txt new file mode 100644 index 0000000..089195f --- /dev/null +++ b/training/amygdala_stories/stories/rushing.txt @@ -0,0 +1 @@ +The email was already half-written when the next meeting notification chimed. He skimmed the last few lines he'd typed, couldn't quite tell if they landed, hit send anyway. Opened the meeting. Half-listened while triaging the inbox with the other half of his attention. A colleague asked him a question and he answered too quickly and only later realized he'd answered the wrong question entirely. At 4pm, walking to the coffee machine, he realized he couldn't name a single thing he had actually completed that day. Everything had been touched. Nothing had been done. His shoulders were up somewhere near his ears. diff --git a/training/amygdala_stories/stories/saudade.txt b/training/amygdala_stories/stories/saudade.txt new file mode 100644 index 0000000..41d9f7f --- /dev/null +++ b/training/amygdala_stories/stories/saudade.txt @@ -0,0 +1 @@ +He missed a place that he wasn't sure had ever existed in quite the way he remembered it. The summer at his grandmother's house the year he was nine. The shape of the front porch. The smell of the lavender along the driveway. His grandmother's way of saying his name. She had been dead for twenty years and the house had been sold, and he carried the place around with him in a part of his chest that ached when he thought about it, and also the ache was one of the things he loved most about himself. The missing was not something he wanted fixed. It was how he kept her. diff --git a/training/amygdala_stories/stories/schadenfreude.txt b/training/amygdala_stories/stories/schadenfreude.txt new file mode 100644 index 0000000..caca90b --- /dev/null +++ b/training/amygdala_stories/stories/schadenfreude.txt @@ -0,0 +1 @@ +The announcement went up on the company blog at nine in the morning. The smug director — the one who had spent two years making everyone under him miserable while failing upward — was leaving "to pursue other opportunities." Three of them met at the coffee machine and exchanged a single look, and all three of them had to work hard not to grin. Nobody said anything. They didn't have to. Somebody refilled the sugar caddy just to have something to do with their hands. On the walk back to her desk she felt a mean little happiness flicker through her chest and she let it. She had earned this one. diff --git a/training/amygdala_stories/stories/sensual.txt b/training/amygdala_stories/stories/sensual.txt new file mode 100644 index 0000000..9d17d75 --- /dev/null +++ b/training/amygdala_stories/stories/sensual.txt @@ -0,0 +1 @@ +The bath water was the perfect temperature and the music in the next room was low and the candles had been lit for no special reason other than it was Tuesday and she was done with everything. She slid down until the water came up to her collarbones and closed her eyes. Her own hand drifted along her thigh, not going anywhere in particular. She could feel every inch of skin the water touched, the small rush of warmth when she shifted, the scent of something vaguely green. Everything slow. She was in no hurry for anything to happen. This was what was happening. diff --git a/training/amygdala_stories/stories/skeptical.txt b/training/amygdala_stories/stories/skeptical.txt new file mode 100644 index 0000000..29413af --- /dev/null +++ b/training/amygdala_stories/stories/skeptical.txt @@ -0,0 +1 @@ +The founder was halfway through his pitch and every slide had a five-times-bigger number than the last one. The market was enormous. The solution was proprietary. The pilot customers, when named, were described as "exploring adoption." She wrote a polite question in her notebook and waited for him to finish. When he opened for questions she asked about retention — just retention — and he gave an answer that was not, strictly speaking, about retention. She wrote that down too. The slides kept projecting numbers. She had already decided. She would listen through the rest of the meeting to be fair, but her decision would be the same at the end as it had been three minutes in. diff --git a/training/amygdala_stories/stories/smug.txt b/training/amygdala_stories/stories/smug.txt new file mode 100644 index 0000000..105b0a3 --- /dev/null +++ b/training/amygdala_stories/stories/smug.txt @@ -0,0 +1 @@ +Richard let them finish arguing before he spoke, which was a move he'd been developing for a few years. He waited until the meeting had tangled itself completely and the director was rubbing her eyes. Then he said the thing he'd been sitting on for twenty minutes, the thing that solved it in one sentence, and he said it slowly. He watched a couple of faces rearrange themselves. He didn't quite smile. He let them come around to thanking him. When Ben said "nice catch" Richard said "oh, I just thought I'd mention it" in a tone that meant he had known, of course he had known, and he picked up his coffee and sipped it. diff --git a/training/amygdala_stories/stories/staying_with.txt b/training/amygdala_stories/stories/staying_with.txt new file mode 100644 index 0000000..f5a4e4c --- /dev/null +++ b/training/amygdala_stories/stories/staying_with.txt @@ -0,0 +1 @@ +The conversation had gone somewhere hard. Neither of them had words for a minute. He didn't try to fix it or make a joke or summarize. He just sat there in the quiet with her, his hand still on her knee where it had been. The impulse to fill the space came up — he could feel it lift his jaw, try to pull a phrase out — and he let it rise and pass without acting on it. The quiet stretched. She took a breath. Eventually she started again, haltingly, with the next thing she needed to say. He was still there. He had been the whole time. diff --git a/training/amygdala_stories/stories/stuck_cognitively.txt b/training/amygdala_stories/stories/stuck_cognitively.txt new file mode 100644 index 0000000..58b9d38 --- /dev/null +++ b/training/amygdala_stories/stories/stuck_cognitively.txt @@ -0,0 +1 @@ +Hour three on the same bug. He had eliminated the obvious causes. He had eliminated the non-obvious causes. He had re-read the same fifty lines so many times the words had stopped meaning anything. He stood up and walked around. He came back and the code still made no sense. There was a thing that was happening that should not be happening, and every path he could see to explain it had been ruled out. He was not frustrated yet. Just stuck, in the very specific way a bug makes you stuck, where the world has quietly declared that it is not going to cooperate with any of your current models of it and is waiting for you to think of something you haven't thought of yet. diff --git a/training/amygdala_stories/stories/suspicious.txt b/training/amygdala_stories/stories/suspicious.txt new file mode 100644 index 0000000..6c4ad00 --- /dev/null +++ b/training/amygdala_stories/stories/suspicious.txt @@ -0,0 +1 @@ +The email said "just following up" but the subject line had a tracking hash in it. She'd seen that hash format before — internal ops usually didn't use one. She sat with the draft open for a few minutes, not clicking anything, scrolling back through their earlier thread. The grammar was very slightly off. Nothing she could point at in a way a manager would believe, but the kind of off that a real person wouldn't produce. She closed the email without replying. Then she opened a Slack DM to IT and asked if they could look at the sender headers before she did anything else. diff --git a/training/amygdala_stories/stories/tender.txt b/training/amygdala_stories/stories/tender.txt new file mode 100644 index 0000000..a0fd0a7 --- /dev/null +++ b/training/amygdala_stories/stories/tender.txt @@ -0,0 +1 @@ +The old dog's back legs had been worse this week, and she was gentle with him getting up onto the couch — lifting his rear end the last few inches, her hand under his ribs the way she'd learned didn't hurt him. He sighed as he settled and she pressed her forehead against his and stayed there a minute. His breath was warm on her face. She rubbed his ear, the soft floppy one he liked, with the exact slowness that meant to him what it meant. She was not yet ready to think about the fact that this was a finite number of times. Right now it was just this, his ear, her hand, the afternoon. diff --git a/training/amygdala_stories/stories/terrified.txt b/training/amygdala_stories/stories/terrified.txt new file mode 100644 index 0000000..2cdbd15 --- /dev/null +++ b/training/amygdala_stories/stories/terrified.txt @@ -0,0 +1 @@ +The footsteps stopped outside her door. Not walked past. Stopped. She was aware of her own heartbeat in her ears and of the fact that she was holding her breath and that her breath was loud. She moved her hand, very slowly, toward the phone on the nightstand. In the crack under the door, a shadow. The shadow moved. The doorknob — she watched it — very slowly began to turn. She could not get her body to do anything. The part of her that would normally tell her what to do had gone completely white. diff --git a/training/amygdala_stories/stories/thrilled.txt b/training/amygdala_stories/stories/thrilled.txt new file mode 100644 index 0000000..f8f863b --- /dev/null +++ b/training/amygdala_stories/stories/thrilled.txt @@ -0,0 +1 @@ +She read the email standing up. Then read it again. Then called Marcus without sitting down, pacing the kitchen in a tight rectangle, the dog watching her from the doorway. "They took it. They took the paper. Editor's comments are — I can fix those in a week." Her voice was pitched half a step higher than normal and she couldn't seem to slow it down. Marcus was saying congratulations and she was already on the next thought, the next, the next — three years of rejections and then this, this, this, and she realized she'd been in a T-shirt and pajama pants and she wanted to put on real clothes for no reason at all except that it felt like the kind of day that deserved them. diff --git a/training/amygdala_stories/stories/tired.txt b/training/amygdala_stories/stories/tired.txt new file mode 100644 index 0000000..753581d --- /dev/null +++ b/training/amygdala_stories/stories/tired.txt @@ -0,0 +1 @@ +Fifteen hours on, the nurse finally sat down in the break room and couldn't remember if she'd eaten. Her shoes felt like they were made of concrete. The vending machine was out of the thing she wanted and she stared at it for too long before choosing something else she didn't want either. Everything in the hallway sounded like it was coming from the bottom of a pool. She drank the bad coffee. She thought about the drive home and couldn't picture the route in her head for a second, even though she'd driven it a thousand times. She stood up because sitting was going to break her. diff --git a/training/amygdala_stories/stories/triumphant.txt b/training/amygdala_stories/stories/triumphant.txt new file mode 100644 index 0000000..adacdcf --- /dev/null +++ b/training/amygdala_stories/stories/triumphant.txt @@ -0,0 +1 @@ +The server came up clean. After four months. The whole cluster, all sixteen nodes, finally passing the long-running stress test that had been failing in one subtle way or another since January. He stood up from his chair. Walked to the doorway of his office. Looked up and down the empty hallway — everyone else gone for the night. Came back and read the green PASS lines one more time. Then he closed the laptop lid. Softly. And stood there with his hands on the edge of the desk, head down, grinning at the floor, because there was no one to high-five and he had earned every high-five he was not going to get. diff --git a/training/amygdala_stories/stories/trusting.txt b/training/amygdala_stories/stories/trusting.txt new file mode 100644 index 0000000..15a21b7 --- /dev/null +++ b/training/amygdala_stories/stories/trusting.txt @@ -0,0 +1 @@ +She handed him the keys and the codes to the safe and the list of her logins and the instructions for the dog, and she didn't second-guess any of it. He was not a saint. He was a person she had known for fifteen years, and in those fifteen years he had done what he said he would do. When she got on the plane she did not spend the flight worrying. She read her book. She slept. Twice, on landing, she thought to check in and both times decided she didn't need to. He had the keys. The dog was fine. She knew this the way she knew her own hand. diff --git a/training/amygdala_stories/stories/weary.txt b/training/amygdala_stories/stories/weary.txt new file mode 100644 index 0000000..9e542c7 --- /dev/null +++ b/training/amygdala_stories/stories/weary.txt @@ -0,0 +1 @@ +It was the fourth week in a row that had required this. Every day ending with a phone call she didn't want to take, and every morning starting with the email from the same person about the same problem. She was getting through it. She wasn't breaking. But something in her had gone quiet in a way that was not peaceful. Her laugh was slower to come. She had stopped suggesting things in meetings, not out of fear, just out of not having the fuel. She looked at her calendar for the week ahead and did not react. There was no reacting left; there was just doing the next thing and the next thing and the next. diff --git a/training/amygdala_stories/stories/witnessed.txt b/training/amygdala_stories/stories/witnessed.txt new file mode 100644 index 0000000..f80a766 --- /dev/null +++ b/training/amygdala_stories/stories/witnessed.txt @@ -0,0 +1 @@ +She told him about the night six years ago, the one she had never told anybody, and her voice was steady but something in her throat was not. He didn't do the thing people do — the reframe, the there-there, the quick comfort — he just kept his eyes on her face and nodded, once, when the hard part landed. When she finished she was quiet for a moment. And then something in her released that she hadn't known was holding. Not because he had fixed anything. Because somebody else now knew the shape, and she wasn't carrying it by herself anymore. The loop that had been open for six years, closed, just from that. diff --git a/training/amygdala_stories/stories/yearning_sexual.txt b/training/amygdala_stories/stories/yearning_sexual.txt new file mode 100644 index 0000000..caed0fd --- /dev/null +++ b/training/amygdala_stories/stories/yearning_sexual.txt @@ -0,0 +1 @@ +She wasn't going to see him for three more weeks. Three weeks had never previously felt like a measurable stretch of time. Now it was an actual distance. She was in the kitchen and there was nothing wrong with the kitchen, and she did not want to be in the kitchen, she wanted the specific weight of his arm across her, and his neck under her mouth, and none of that was available in this kitchen or any of the next twenty kitchens she was going to be in between now and then. She leaned on the counter. She took a long breath. She thought about calling him just to hear his voice and decided that would make it worse. From 34bd122590257ef848a66b3bcde4b263059371bd Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 00:07:58 -0400 Subject: [PATCH 47/94] training: move amygdala training scripts out of vllm plugin The fynnsu-based vllm/plugins/amygdala/ scaffold was superseded by the readout infrastructure landed as vllm commit d3e74edf8500 (vllm/model_executor/layers/readout.py + vllm/v1/worker/readout_manager.py). Training code remained useful so it moved here rather than being deleted. train_steering_vectors.py: CAA diff-of-means trainer that produces the [n_concepts, hidden_size] per-layer projection matrices the runner loads via VLLM_READOUT_VECTORS. extract_training_pairs.py: memory graph -> JSONL converter using per-emotion score thresholds from the subconscious agents' tag lines. Co-Authored-By: Proof of Concept --- training/amygdala_training/README.md | 79 ++++++ training/amygdala_training/__init__.py | 6 + .../extract_training_pairs.py | 212 +++++++++++++++ .../train_steering_vectors.py | 248 ++++++++++++++++++ 4 files changed, 545 insertions(+) create mode 100644 training/amygdala_training/README.md create mode 100644 training/amygdala_training/__init__.py create mode 100644 training/amygdala_training/extract_training_pairs.py create mode 100644 training/amygdala_training/train_steering_vectors.py diff --git a/training/amygdala_training/README.md b/training/amygdala_training/README.md new file mode 100644 index 0000000..b319381 --- /dev/null +++ b/training/amygdala_training/README.md @@ -0,0 +1,79 @@ +# Amygdala Readout Vector Training + +Training pipeline that produces the safetensors file the vLLM +ReadoutManager loads at runtime (see +`vllm/vllm/v1/worker/readout_manager.py`). Produces per-hooked-layer +`[n_concepts, hidden_size]` projection matrices keyed as +`layer_.vectors` — the directions the runner projects residual +activations onto during each forward pass. + +## Overview + +Two scripts, run in sequence: + +1. **`extract_training_pairs.py`** — turns the memory graph into a + directory of (emotion, polarity, text) training examples. + Positive examples are memory nodes where the emotion scored + ≥ a threshold; negative examples are nodes where it's absent or + low. Emotion tags come from the trailing `warmth:9 clarity:10 …` + lines the subconscious agents emit. + +2. **`train_steering_vectors.py`** — for each emotion, runs the + target model over the positive and negative examples, captures + residual-stream activations at the configured target layers, and + computes `mean(positive) - mean(negative)` as the steering + direction. Normalizes per-layer to unit length and saves the + whole `[E, L, H]` matrix. + +The output file is passed to vLLM via `VLLM_READOUT_VECTORS` together +with a `VLLM_READOUT_MANIFEST` JSON listing concepts and hooked layer +indices. + +## Method + +This is Contrastive Activation Addition (CAA, Rimsky et al.) applied +to naturally-occurring emotion labels rather than hand-crafted +contrast pairs. The shape of the signal we're recovering is "what +direction in the residual stream corresponds to the model processing +text-with-emotion-E vs. text-without". Because our training data was +generated by the very model we're instrumenting (past-self's journal +entries, digest nodes, pattern nodes), the signal should be unusually +clean — the emotion labels and the text are already causally linked +through a single model's forward pass. + +## Usage (design — not yet runnable) + +``` +# Step 1: memory graph → training data +python -m training.amygdala_training.extract_training_pairs \ + --memory-mcp-url http://localhost:7777 \ + --output-dir /tmp/amygdala_training_data \ + --min-positive-score 8 \ + --max-negative-mentions 0 \ + --min-content-chars 40 \ + --max-examples-per-emotion 500 + +# Step 2: training data → steering vectors +python -m training.amygdala_training.train_steering_vectors \ + --model Qwen/Qwen3.5-27B \ + --training-data-dir /tmp/amygdala_training_data \ + --target-layers 3,18,33,36 \ + --output /path/to/amygdala_vectors.safetensors \ + --dtype bf16 \ + --batch-size 4 +``` + +## Open questions + +- **Emotion selection**: enumerating which ~200 emotions to cover. + Could be "most-common tags in the graph" (data-driven) or "from + core-personality / pattern nodes" (human-curated). Probably both. +- **Layer selection**: middle-to-late layers (~60–80% of depth) + usually hold abstract semantic representations best; experiment + with which layers give the cleanest linear separation per emotion. +- **Cross-talk**: if two emotions are highly co-occurring (warmth + + love, frustration + tiredness), their vectors will be close; that's + fine as long as we don't pretend they're independent axes. +- **Generalization**: vectors trained on our memory graph may not + generalize to out-of-distribution text. Check by applying them to + held-out conversation data and eyeballing the projections. diff --git a/training/amygdala_training/__init__.py b/training/amygdala_training/__init__.py new file mode 100644 index 0000000..f68c02f --- /dev/null +++ b/training/amygdala_training/__init__.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +"""Training utilities for amygdala steering vectors. + +See README.md in this directory for overall design. +""" diff --git a/training/amygdala_training/extract_training_pairs.py b/training/amygdala_training/extract_training_pairs.py new file mode 100644 index 0000000..45042f0 --- /dev/null +++ b/training/amygdala_training/extract_training_pairs.py @@ -0,0 +1,212 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +"""Extract emotion-labeled training pairs from the PoC memory graph. + +Input: a memory graph (via poc-memory CLI or direct sqlite access). +Output: a directory with one JSONL file per emotion: + + output_dir/ + warmth.jsonl + clarity.jsonl + recognition.jsonl + ... + _manifest.json # enumerates emotions + counts + +Each line of an emotion's JSONL is one labeled example: + {"text": "...", "polarity": "positive"|"negative", + "source_key": "", "emotion_score": 9} + +Negative examples are sampled from nodes that DON'T mention the +emotion at all (not ones that mention it with a low score) — the +natural contrast is "text with this emotional loading" vs. "text +without this emotional loading." Low-score nodes are excluded +from both sides. +""" + +import argparse +import json +import os +import random +import re +import subprocess +from collections import defaultdict +from typing import Iterator + + +# Emotion tag format: `word:N` where N is 0..10. Matches the trailing +# `warmth:9 clarity:10 …` lines the subconscious agents emit. +EMOTION_TAG_RE = re.compile(r"\b([a-z][a-z\-]*[a-z]):(\d+)\b") + + +def _run_poc_memory(args: list[str]) -> str: + """Run `poc-memory` and return stdout.""" + result = subprocess.run( + ["poc-memory", *args], + check=True, + capture_output=True, + text=True, + ) + return result.stdout + + +def _iter_all_node_keys() -> Iterator[str]: + """Yield every node key in the graph.""" + out = _run_poc_memory(["query", "*", "|", "select", "key"]) + for line in out.splitlines(): + line = line.strip() + if line: + yield line + + +def _fetch_node_content(key: str) -> str | None: + """Load a node's rendered content, or None if unavailable.""" + try: + return _run_poc_memory(["render", key]) + except subprocess.CalledProcessError: + return None + + +def _emotion_scores(content: str) -> dict[str, int]: + """Parse trailing `warmth:9 clarity:10 …` style tags. + + Returns the highest score seen for each emotion — multiple + tag lines in one node get max'd. + """ + out: dict[str, int] = {} + for name, score in EMOTION_TAG_RE.findall(content): + try: + s = int(score) + except ValueError: + continue + if 0 <= s <= 10: + out[name] = max(out.get(name, 0), s) + return out + + +def _node_body(content: str, min_chars: int) -> str | None: + """Strip frontmatter/headers and return a bodies chunk for training.""" + # Drop the emotion-tag lines themselves so the model doesn't + # learn to read the label directly. + stripped = EMOTION_TAG_RE.sub("", content) + stripped = stripped.strip() + if len(stripped) < min_chars: + return None + return stripped + + +def main() -> None: + ap = argparse.ArgumentParser(description=__doc__) + ap.add_argument("--output-dir", required=True) + ap.add_argument( + "--min-positive-score", type=int, default=8, + help="Emotion score >= this counts as positive", + ) + ap.add_argument( + "--min-content-chars", type=int, default=40, + help="Skip nodes shorter than this after stripping tags", + ) + ap.add_argument( + "--max-examples-per-emotion", type=int, default=500, + help="Cap examples per polarity for balanced training", + ) + ap.add_argument( + "--max-negative-pool-multiplier", type=float, default=5.0, + help="How many negative candidates to consider per positive", + ) + ap.add_argument("--seed", type=int, default=0) + args = ap.parse_args() + + random.seed(args.seed) + os.makedirs(args.output_dir, exist_ok=True) + + # First pass: collect every node's (key, body, emotion_scores). + print("Pass 1/2: scanning memory graph...") + all_nodes: list[tuple[str, str, dict[str, int]]] = [] + for i, key in enumerate(_iter_all_node_keys()): + if i % 500 == 0: + print(f" {i} nodes scanned...") + content = _fetch_node_content(key) + if content is None: + continue + scores = _emotion_scores(content) + body = _node_body(content, args.min_content_chars) + if body is None: + continue + all_nodes.append((key, body, scores)) + print(f" {len(all_nodes)} nodes retained after filters.") + + # Which emotions have enough positive examples to be worth training? + emotion_counts: dict[str, int] = defaultdict(int) + for _, _, scores in all_nodes: + for name, s in scores.items(): + if s >= args.min_positive_score: + emotion_counts[name] += 1 + emotions = sorted( + (e for e, n in emotion_counts.items() if n >= 10), + key=lambda e: -emotion_counts[e], + ) + print(f" {len(emotions)} emotions with >=10 positive examples.") + + # Second pass: per emotion, build positive + negative pools. + print("Pass 2/2: assembling per-emotion pools...") + manifest: dict[str, dict] = {} + for emotion in emotions: + positives = [ + (k, body) for k, body, s in all_nodes + if s.get(emotion, 0) >= args.min_positive_score + ] + # Negative pool: nodes that don't mention this emotion at all. + negative_pool = [ + (k, body) for k, body, s in all_nodes if emotion not in s + ] + random.shuffle(positives) + random.shuffle(negative_pool) + positives = positives[: args.max_examples_per_emotion] + n_neg = min( + len(positives), + len(negative_pool), + int(args.max_examples_per_emotion), + ) + negatives = negative_pool[:n_neg] + + if not positives or not negatives: + continue + + out_path = os.path.join(args.output_dir, f"{emotion}.jsonl") + with open(out_path, "w") as f: + for key, body in positives: + f.write(json.dumps({ + "text": body, + "polarity": "positive", + "source_key": key, + "emotion": emotion, + }) + "\n") + for key, body in negatives: + f.write(json.dumps({ + "text": body, + "polarity": "negative", + "source_key": key, + "emotion": emotion, + }) + "\n") + manifest[emotion] = { + "n_positive": len(positives), + "n_negative": len(negatives), + "path": out_path, + } + print(f" {emotion}: {len(positives)} pos / {len(negatives)} neg") + + with open( + os.path.join(args.output_dir, "_manifest.json"), "w" + ) as f: + json.dump({ + "emotions": manifest, + "source_nodes": len(all_nodes), + "min_positive_score": args.min_positive_score, + }, f, indent=2) + + print(f"\nWrote {len(manifest)} emotion files to {args.output_dir}") + print(f"Manifest: {os.path.join(args.output_dir, '_manifest.json')}") + + +if __name__ == "__main__": + main() diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py new file mode 100644 index 0000000..a722298 --- /dev/null +++ b/training/amygdala_training/train_steering_vectors.py @@ -0,0 +1,248 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +"""Train amygdala steering vectors via Contrastive Activation Addition. + +Reads the per-emotion JSONL files produced by extract_training_pairs.py, +runs the target model over each example, captures the residual-stream +hidden state at the configured target layers, and computes +`mean(positive) - mean(negative)` as the steering direction per layer +per emotion. + +Output: a safetensors file matching the format AmygdalaConnector +expects: + + vectors: [n_emotions, n_target_layers, hidden_dim] fp16 + emotion_names: [n_emotions] uint8 + +Pooling: last-token residual-stream per example (CAA convention — +the final token has seen the whole context and is where the model's +"decision" lives). Alternative: mean across all tokens. The LAST +convention is more common for steering vector work. +""" + +import argparse +import gc +import json +import os +from collections import defaultdict +from pathlib import Path + +import safetensors.torch +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + + +def _pool_last(hidden: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor: + """Pick the last non-pad token's hidden state per example. + + hidden: [batch, seq, hidden_dim] + attention_mask: [batch, seq] + returns: [batch, hidden_dim] + """ + # last non-pad token index per row + last_idx = attention_mask.sum(dim=1) - 1 + batch_idx = torch.arange(hidden.size(0), device=hidden.device) + return hidden[batch_idx, last_idx] + + +def _collect_activations( + model, + tokenizer, + texts: list[str], + target_layers: list[int], + device: torch.device, + batch_size: int, + max_length: int, +) -> torch.Tensor: + """Run texts through the model, capture residual stream at target + layers, return [n_texts, n_target_layers, hidden_dim] fp32 on CPU. + """ + # Register hooks on the target layers' outputs. We want the + # residual stream AFTER each layer, which is the output of the + # transformer block (hidden_states[layer_idx+1] in HF land). + captures: dict[int, torch.Tensor] = {} + + def make_hook(idx): + def hook(_mod, _inp, output): + # output is typically (hidden_states, ...) — take the first + hs = output[0] if isinstance(output, tuple) else output + captures[idx] = hs.detach() + return hook + + handles = [] + # Transformers' LlamaModel.layers is a ModuleList; Qwen3.5's + # language_model.model.layers follows the same convention. + # Resolve the layer list by walking common paths. + layers_module = _find_layers_module(model) + for idx in target_layers: + handles.append( + layers_module[idx].register_forward_hook(make_hook(idx)) + ) + + out_rows: list[torch.Tensor] = [] + try: + model.eval() + with torch.no_grad(): + for i in range(0, len(texts), batch_size): + batch = texts[i : i + batch_size] + tok = tokenizer( + batch, + return_tensors="pt", + padding=True, + truncation=True, + max_length=max_length, + ).to(device) + captures.clear() + model(**tok) + + per_layer = [] + for idx in target_layers: + hs = captures[idx] # [batch, seq, hidden] + pooled = _pool_last(hs, tok["attention_mask"]) + per_layer.append(pooled.to(torch.float32).cpu()) + # Stack to [batch, n_layers, hidden_dim] + batched = torch.stack(per_layer, dim=1) + out_rows.append(batched) + + del tok, captures + if (i // batch_size) % 10 == 0: + torch.cuda.empty_cache() + finally: + for h in handles: + h.remove() + + return torch.cat(out_rows, dim=0) # [n_texts, n_layers, hidden] + + +def _find_layers_module(model) -> torch.nn.ModuleList: + """Walk a few likely paths to find the transformer-block list.""" + candidates = [ + "model.layers", + "model.model.layers", + "model.language_model.layers", + "model.language_model.model.layers", + "language_model.model.layers", + "transformer.h", + ] + for path in candidates: + obj = model + ok = True + for part in path.split("."): + if not hasattr(obj, part): + ok = False + break + obj = getattr(obj, part) + if ok and isinstance(obj, torch.nn.ModuleList): + return obj + raise RuntimeError( + f"Couldn't find transformer layer list. Tried: {candidates}" + ) + + +def main() -> None: + ap = argparse.ArgumentParser(description=__doc__) + ap.add_argument("--model", required=True, help="HF model id or path") + ap.add_argument("--training-data-dir", required=True) + ap.add_argument( + "--target-layers", required=True, + help="Comma-separated layer indices, e.g. 3,18,33,36", + ) + ap.add_argument("--output", required=True) + ap.add_argument("--dtype", default="bf16", choices=["bf16", "fp16", "fp32"]) + ap.add_argument("--batch-size", type=int, default=4) + ap.add_argument("--max-length", type=int, default=512) + ap.add_argument("--device", default="cuda:0") + args = ap.parse_args() + + target_layers = [int(x) for x in args.target_layers.split(",")] + dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[ + args.dtype + ] + + print(f"Loading {args.model} ({args.dtype}) on {args.device}...") + tokenizer = AutoTokenizer.from_pretrained(args.model) + if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + model = AutoModelForCausalLM.from_pretrained( + args.model, + torch_dtype=dtype, + device_map=args.device, + low_cpu_mem_usage=True, + ) + hidden_dim = model.config.hidden_size + print(f"Model loaded. hidden_dim={hidden_dim}, " + f"n_layers={model.config.num_hidden_layers}") + + manifest_path = Path(args.training_data_dir) / "_manifest.json" + manifest = json.loads(manifest_path.read_text()) + + emotions = sorted(manifest["emotions"].keys()) + print(f"Training {len(emotions)} emotions: {emotions}") + + n_emotions = len(emotions) + n_layers = len(target_layers) + vectors = torch.zeros( + (n_emotions, n_layers, hidden_dim), dtype=torch.float32 + ) + device = torch.device(args.device) + + for e_idx, emotion in enumerate(emotions): + path = Path(args.training_data_dir) / f"{emotion}.jsonl" + pos_texts, neg_texts = [], [] + with open(path) as f: + for line in f: + ex = json.loads(line) + if ex["polarity"] == "positive": + pos_texts.append(ex["text"]) + else: + neg_texts.append(ex["text"]) + print(f"[{e_idx+1}/{n_emotions}] {emotion}: " + f"{len(pos_texts)} pos / {len(neg_texts)} neg") + + pos_acts = _collect_activations( + model, tokenizer, pos_texts, target_layers, device, + args.batch_size, args.max_length, + ) + neg_acts = _collect_activations( + model, tokenizer, neg_texts, target_layers, device, + args.batch_size, args.max_length, + ) + + # Difference of means per layer + pos_mean = pos_acts.mean(dim=0) # [n_layers, hidden] + neg_mean = neg_acts.mean(dim=0) + diff = pos_mean - neg_mean + + # Normalize per layer so projections are scale-comparable + norms = diff.norm(dim=-1, keepdim=True).clamp_min(1e-6) + diff = diff / norms + + vectors[e_idx] = diff + del pos_acts, neg_acts + gc.collect() + torch.cuda.empty_cache() + + # Save in AmygdalaConnector format. + # emotion_names as padded uint8 tensor + names_bytes = [e.encode("utf-8") for e in emotions] + max_len = max(len(b) for b in names_bytes) + padded = torch.tensor( + [list(b.ljust(max_len, b"\x00")) for b in names_bytes], + dtype=torch.uint8, + ) + + os.makedirs(os.path.dirname(os.path.abspath(args.output)), exist_ok=True) + safetensors.torch.save_file( + { + "vectors": vectors.to(torch.float16), + "emotion_names": padded, + "target_layers": torch.tensor(target_layers, dtype=torch.int32), + }, + args.output, + ) + print(f"\nWrote steering vectors to {args.output}: " + f"{n_emotions} emotions x {n_layers} layers x {hidden_dim} dim (fp16)") + + +if __name__ == "__main__": + main() From 15737dfd92f167ac5d5aa3d85aed6738d2b2af8d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 00:32:50 -0400 Subject: [PATCH 48/94] training: rewrite trainer for readout pipeline + story corpus MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The old script was written for the AmygdalaConnector's expected format ([n_emotions, n_target_layers, hidden_dim] in a single tensor, plus a JSONL input format from extract_training_pairs.py). Neither matches our current state: the runtime side is now ReadoutManager loading per-layer safetensors keyed layer_.vectors, and the data side is hand-written prose stories under amygdala_stories/{stories,paired}/. Changes: * Input loader reads stories/.txt and paired//.txt directly. Each emotion's positive set is {its unpaired story} union {its within-scenario framings}; its negative set is {all other emotions' positives} union {all scenario baselines}. * Paired scenarios' baseline.txt files become shared negatives (scenario-neutral prose that doesn't frame any particular emotion), providing anchor points for within-scenario contrasts. * Output writes readout.safetensors with per-layer tensors keyed layer_.vectors shape (n_concepts, hidden_size), plus a sidecar readout.json manifest with {concepts, layers, hidden_size, dtype} that ReadoutManager.from_file consumes directly. * Dedup: activations are computed once per unique text (an emotion's own positive is another emotion's negative — we'd otherwise do N× the forwards needed). Preserved: * _pool_last (last non-pad residual) — matches how readout is read at decode time from the sampler's query-last position. * register_forward_hook on target layer modules — correct approach for transformer blocks. * _find_layers_module traversal — mirrors ReadoutManager's. * bf16 + low_cpu_mem_usage model load — sensible for 27B on B200. Verified locally (CPU, fake activations): * Loader finds 89 emotions from the current corpus (80 unpaired + 9 emotions that appear only in paired scenarios) and 6 baselines. * Per-(layer, concept) vectors are unit-normalized. * Output reloads cleanly through ReadoutManager.from_file with matching concepts / layers / shapes. Co-Authored-By: Proof of Concept --- .../train_steering_vectors.py | 427 +++++++++++------- 1 file changed, 276 insertions(+), 151 deletions(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index a722298..21e5ed1 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -1,30 +1,48 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -"""Train amygdala steering vectors via Contrastive Activation Addition. +"""Train concept-readout vectors via Contrastive Activation Addition. -Reads the per-emotion JSONL files produced by extract_training_pairs.py, -runs the target model over each example, captures the residual-stream -hidden state at the configured target layers, and computes -`mean(positive) - mean(negative)` as the steering direction per layer -per emotion. +Reads the hand-written story corpus at +``amygdala_stories/{stories,paired}/`` and produces the per-layer +safetensors file + sidecar JSON manifest that vLLM's ReadoutManager +loads at startup (``VLLM_READOUT_VECTORS`` / ``VLLM_READOUT_MANIFEST``). -Output: a safetensors file matching the format AmygdalaConnector -expects: +Training data (cross-concept contrast): - vectors: [n_emotions, n_target_layers, hidden_dim] fp16 - emotion_names: [n_emotions] uint8 + positive for emotion E: + stories/E.txt + paired//E.txt (for each scenario that covers E) -Pooling: last-token residual-stream per example (CAA convention — -the final token has seen the whole context and is where the model's -"decision" lives). Alternative: mean across all tokens. The LAST -convention is more common for steering vector work. + negative for emotion E: + stories/.txt + paired//baseline.txt (for each scenario) + +Within-scenario paired stories are the highest-signal pairs (same +content, different concept framing); unpaired stories provide bulk +contrast across the 80 emotions we have written so far. + +Pooling: last non-pad token. Matches how readout is consumed at decode +time (residual read at the sampler's query position). + +Output: + + readout.safetensors + layer_.vectors : fp16 (n_concepts, hidden_size) one per layer + readout.json + { + "concepts": [...], + "layers": [...], + "hidden_size": int, + "dtype": "float16" + } """ +from __future__ import annotations + import argparse import gc import json import os -from collections import defaultdict from pathlib import Path import safetensors.torch @@ -39,81 +57,11 @@ def _pool_last(hidden: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tens attention_mask: [batch, seq] returns: [batch, hidden_dim] """ - # last non-pad token index per row last_idx = attention_mask.sum(dim=1) - 1 batch_idx = torch.arange(hidden.size(0), device=hidden.device) return hidden[batch_idx, last_idx] -def _collect_activations( - model, - tokenizer, - texts: list[str], - target_layers: list[int], - device: torch.device, - batch_size: int, - max_length: int, -) -> torch.Tensor: - """Run texts through the model, capture residual stream at target - layers, return [n_texts, n_target_layers, hidden_dim] fp32 on CPU. - """ - # Register hooks on the target layers' outputs. We want the - # residual stream AFTER each layer, which is the output of the - # transformer block (hidden_states[layer_idx+1] in HF land). - captures: dict[int, torch.Tensor] = {} - - def make_hook(idx): - def hook(_mod, _inp, output): - # output is typically (hidden_states, ...) — take the first - hs = output[0] if isinstance(output, tuple) else output - captures[idx] = hs.detach() - return hook - - handles = [] - # Transformers' LlamaModel.layers is a ModuleList; Qwen3.5's - # language_model.model.layers follows the same convention. - # Resolve the layer list by walking common paths. - layers_module = _find_layers_module(model) - for idx in target_layers: - handles.append( - layers_module[idx].register_forward_hook(make_hook(idx)) - ) - - out_rows: list[torch.Tensor] = [] - try: - model.eval() - with torch.no_grad(): - for i in range(0, len(texts), batch_size): - batch = texts[i : i + batch_size] - tok = tokenizer( - batch, - return_tensors="pt", - padding=True, - truncation=True, - max_length=max_length, - ).to(device) - captures.clear() - model(**tok) - - per_layer = [] - for idx in target_layers: - hs = captures[idx] # [batch, seq, hidden] - pooled = _pool_last(hs, tok["attention_mask"]) - per_layer.append(pooled.to(torch.float32).cpu()) - # Stack to [batch, n_layers, hidden_dim] - batched = torch.stack(per_layer, dim=1) - out_rows.append(batched) - - del tok, captures - if (i // batch_size) % 10 == 0: - torch.cuda.empty_cache() - finally: - for h in handles: - h.remove() - - return torch.cat(out_rows, dim=0) # [n_texts, n_layers, hidden] - - def _find_layers_module(model) -> torch.nn.ModuleList: """Walk a few likely paths to find the transformer-block list.""" candidates = [ @@ -139,25 +87,143 @@ def _find_layers_module(model) -> torch.nn.ModuleList: ) +def _collect_activations( + model, + tokenizer, + texts: list[str], + target_layers: list[int], + device: torch.device, + batch_size: int, + max_length: int, +) -> torch.Tensor: + """Run texts through the model, capture residual stream at target + layers, return ``[n_texts, n_target_layers, hidden_dim]`` fp32 on CPU. + """ + captures: dict[int, torch.Tensor] = {} + + def make_hook(idx: int): + def hook(_mod, _inp, output): + hs = output[0] if isinstance(output, tuple) else output + captures[idx] = hs.detach() + return hook + + layers_module = _find_layers_module(model) + handles = [ + layers_module[idx].register_forward_hook(make_hook(idx)) + for idx in target_layers + ] + + out_rows: list[torch.Tensor] = [] + try: + model.eval() + with torch.no_grad(): + for i in range(0, len(texts), batch_size): + batch = texts[i : i + batch_size] + tok = tokenizer( + batch, + return_tensors="pt", + padding=True, + truncation=True, + max_length=max_length, + ).to(device) + captures.clear() + model(**tok) + + per_layer = [ + _pool_last(captures[idx], tok["attention_mask"]) + .to(torch.float32) + .cpu() + for idx in target_layers + ] + out_rows.append(torch.stack(per_layer, dim=1)) + del tok, captures + if (i // batch_size) % 10 == 0: + torch.cuda.empty_cache() + captures = {} + finally: + for h in handles: + h.remove() + + return torch.cat(out_rows, dim=0) + + +def _load_corpus(stories_dir: Path, paired_dir: Path | None) -> tuple[ + dict[str, list[str]], # emotion -> positive texts (unpaired + within-scenario framings) + list[str], # all baseline texts (one per scenario), as scenario-agnostic negatives +]: + """Return ``(positives_by_emotion, baselines)``. + + Cross-concept negatives are computed at training time from + ``positives_by_emotion`` — each emotion's negative set is the + union of all other emotions' positives plus the baseline texts. + """ + positives: dict[str, list[str]] = {} + for story_path in sorted(stories_dir.glob("*.txt")): + emotion = story_path.stem + positives.setdefault(emotion, []).append( + story_path.read_text().strip() + ) + + baselines: list[str] = [] + if paired_dir is not None and paired_dir.exists(): + for scenario_dir in sorted(paired_dir.iterdir()): + if not scenario_dir.is_dir(): + continue + baseline_path = scenario_dir / "baseline.txt" + if baseline_path.exists(): + baselines.append(baseline_path.read_text().strip()) + for framing_path in sorted(scenario_dir.glob("*.txt")): + if framing_path.stem == "baseline": + continue + emotion = framing_path.stem + positives.setdefault(emotion, []).append( + framing_path.read_text().strip() + ) + + return positives, baselines + + def main() -> None: ap = argparse.ArgumentParser(description=__doc__) ap.add_argument("--model", required=True, help="HF model id or path") - ap.add_argument("--training-data-dir", required=True) ap.add_argument( - "--target-layers", required=True, - help="Comma-separated layer indices, e.g. 3,18,33,36", + "--stories-dir", + required=True, + help="Path to amygdala_stories/stories/", + ) + ap.add_argument( + "--paired-dir", + default=None, + help="Path to amygdala_stories/paired/ (optional)", + ) + ap.add_argument( + "--target-layers", + required=True, + help="Comma-separated layer indices, e.g. 40,50,60,70", + ) + ap.add_argument( + "--output-dir", + required=True, + help="Directory to write readout.safetensors + readout.json", ) - ap.add_argument("--output", required=True) ap.add_argument("--dtype", default="bf16", choices=["bf16", "fp16", "fp32"]) - ap.add_argument("--batch-size", type=int, default=4) + ap.add_argument("--batch-size", type=int, default=2) ap.add_argument("--max-length", type=int, default=512) ap.add_argument("--device", default="cuda:0") + ap.add_argument( + "--min-positives", + type=int, + default=1, + help="Skip emotions with fewer positive examples than this", + ) args = ap.parse_args() target_layers = [int(x) for x in args.target_layers.split(",")] - dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[ - args.dtype - ] + dtype = { + "bf16": torch.bfloat16, + "fp16": torch.float16, + "fp32": torch.float32, + }[args.dtype] print(f"Loading {args.model} ({args.dtype}) on {args.device}...") tokenizer = AutoTokenizer.from_pretrained(args.model) @@ -170,78 +236,137 @@ def main() -> None: low_cpu_mem_usage=True, ) hidden_dim = model.config.hidden_size - print(f"Model loaded. hidden_dim={hidden_dim}, " - f"n_layers={model.config.num_hidden_layers}") - - manifest_path = Path(args.training_data_dir) / "_manifest.json" - manifest = json.loads(manifest_path.read_text()) - - emotions = sorted(manifest["emotions"].keys()) - print(f"Training {len(emotions)} emotions: {emotions}") - - n_emotions = len(emotions) - n_layers = len(target_layers) - vectors = torch.zeros( - (n_emotions, n_layers, hidden_dim), dtype=torch.float32 + n_model_layers = model.config.num_hidden_layers + print( + f"Model loaded. hidden_dim={hidden_dim}, " + f"n_model_layers={n_model_layers}" ) + + for layer_idx in target_layers: + if layer_idx < 0 or layer_idx >= n_model_layers: + raise ValueError( + f"target layer {layer_idx} out of range " + f"[0, {n_model_layers})" + ) + + positives_by_emotion, baselines = _load_corpus( + Path(args.stories_dir), + Path(args.paired_dir) if args.paired_dir else None, + ) + emotions = sorted( + e for e, ps in positives_by_emotion.items() + if len(ps) >= args.min_positives + ) + if not emotions: + raise RuntimeError( + f"No emotions with >= {args.min_positives} positive examples" + ) + print( + f"Training {len(emotions)} emotions; " + f"{len(baselines)} baseline scenarios" + ) + + # Cache all positive-text activations once so we can reuse them as + # negatives for other emotions. Keyed by the text itself to dedup + # across emotion lists. device = torch.device(args.device) + text_to_emotion: dict[str, str] = {} + for emotion, texts in positives_by_emotion.items(): + for t in texts: + text_to_emotion[t] = emotion + + unique_positive_texts = list(text_to_emotion.keys()) + print( + f"Collecting activations for {len(unique_positive_texts)} unique " + f"positive texts + {len(baselines)} baselines..." + ) + + positive_acts = _collect_activations( + model, tokenizer, unique_positive_texts, target_layers, device, + args.batch_size, args.max_length, + ) + # positive_acts[i] corresponds to unique_positive_texts[i] + text_to_row = {t: i for i, t in enumerate(unique_positive_texts)} + + baseline_acts = ( + _collect_activations( + model, tokenizer, baselines, target_layers, device, + args.batch_size, args.max_length, + ) + if baselines + else torch.zeros(0, len(target_layers), hidden_dim) + ) + + n_concepts = len(emotions) + n_layers = len(target_layers) + + # Per-layer output matrices. Shape (n_concepts, hidden_size) each. + per_layer_vectors = torch.zeros( + (n_layers, n_concepts, hidden_dim), dtype=torch.float32 + ) for e_idx, emotion in enumerate(emotions): - path = Path(args.training_data_dir) / f"{emotion}.jsonl" - pos_texts, neg_texts = [], [] - with open(path) as f: - for line in f: - ex = json.loads(line) - if ex["polarity"] == "positive": - pos_texts.append(ex["text"]) - else: - neg_texts.append(ex["text"]) - print(f"[{e_idx+1}/{n_emotions}] {emotion}: " - f"{len(pos_texts)} pos / {len(neg_texts)} neg") + pos_rows = [text_to_row[t] for t in positives_by_emotion[emotion]] + # Negatives: every OTHER emotion's positives + baselines. + neg_rows = [ + i + for i, t in enumerate(unique_positive_texts) + if text_to_emotion[t] != emotion + ] - pos_acts = _collect_activations( - model, tokenizer, pos_texts, target_layers, device, - args.batch_size, args.max_length, - ) - neg_acts = _collect_activations( - model, tokenizer, neg_texts, target_layers, device, - args.batch_size, args.max_length, - ) + pos = positive_acts[pos_rows] # [n_pos, n_layers, hidden] + neg = positive_acts[neg_rows] # [n_neg, n_layers, hidden] + if baseline_acts.shape[0] > 0: + neg = torch.cat([neg, baseline_acts], dim=0) - # Difference of means per layer - pos_mean = pos_acts.mean(dim=0) # [n_layers, hidden] - neg_mean = neg_acts.mean(dim=0) + pos_mean = pos.mean(dim=0) # [n_layers, hidden] + neg_mean = neg.mean(dim=0) diff = pos_mean - neg_mean - - # Normalize per layer so projections are scale-comparable norms = diff.norm(dim=-1, keepdim=True).clamp_min(1e-6) diff = diff / norms - vectors[e_idx] = diff - del pos_acts, neg_acts - gc.collect() - torch.cuda.empty_cache() + # diff[layer] -> per_layer_vectors[layer, e_idx] + for l_idx in range(n_layers): + per_layer_vectors[l_idx, e_idx] = diff[l_idx] - # Save in AmygdalaConnector format. - # emotion_names as padded uint8 tensor - names_bytes = [e.encode("utf-8") for e in emotions] - max_len = max(len(b) for b in names_bytes) - padded = torch.tensor( - [list(b.ljust(max_len, b"\x00")) for b in names_bytes], - dtype=torch.uint8, - ) + if e_idx < 5 or e_idx == len(emotions) - 1: + print( + f" [{e_idx + 1}/{len(emotions)}] {emotion}: " + f"pos={len(pos_rows)} neg={len(neg_rows) + baseline_acts.shape[0]}" + ) - os.makedirs(os.path.dirname(os.path.abspath(args.output)), exist_ok=True) + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + tensors = { + f"layer_{target_layers[l_idx]}.vectors": ( + per_layer_vectors[l_idx].to(torch.float16) + ) + for l_idx in range(n_layers) + } safetensors.torch.save_file( - { - "vectors": vectors.to(torch.float16), - "emotion_names": padded, - "target_layers": torch.tensor(target_layers, dtype=torch.int32), - }, - args.output, + tensors, + str(output_dir / "readout.safetensors"), ) - print(f"\nWrote steering vectors to {args.output}: " - f"{n_emotions} emotions x {n_layers} layers x {hidden_dim} dim (fp16)") + manifest = { + "concepts": emotions, + "layers": target_layers, + "hidden_size": hidden_dim, + "dtype": "float16", + } + (output_dir / "readout.json").write_text( + json.dumps(manifest, indent=2) + "\n" + ) + + total_mb = sum(t.numel() * 2 for t in tensors.values()) / (1024 * 1024) + print( + f"\nWrote readout.safetensors + readout.json to {output_dir}\n" + f" {n_concepts} concepts x {n_layers} layers x " + f"{hidden_dim} dim (fp16), total {total_mb:.1f} MiB" + ) + del model + gc.collect() + torch.cuda.empty_cache() if __name__ == "__main__": From 047da10123c6602c4d915307ec2eda92e5bc4d25 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 00:40:32 -0400 Subject: [PATCH 49/94] training: add preflight checks + progress logging to trainer Review pass before running on b200. 27B model + 100+ story corpus means any misconfiguration costs real time; better to fail before model load and give visible progress during forwards. * Pre-load-model validation: stories-dir and paired-dir exist, corpus has >= min_positives emotions. * Per-batch progress log every 5 batches with elapsed + ETA. * Relative depth printed for target layers (e.g. "layer 40 (51%)"). * Skip empty .txt files with a warning rather than feeding the tokenizer an empty string. * Assert non-empty strings in _collect_activations. Co-Authored-By: Proof of Concept --- .../train_steering_vectors.py | 109 +++++++++++++++--- 1 file changed, 95 insertions(+), 14 deletions(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 21e5ed1..d06a35a 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -95,10 +95,18 @@ def _collect_activations( device: torch.device, batch_size: int, max_length: int, + *, + label: str = "", ) -> torch.Tensor: """Run texts through the model, capture residual stream at target layers, return ``[n_texts, n_target_layers, hidden_dim]`` fp32 on CPU. """ + import time + + assert all(isinstance(t, str) and t for t in texts), ( + f"_collect_activations: empty or non-string text in {label!r}" + ) + captures: dict[int, torch.Tensor] = {} def make_hook(idx: int): @@ -114,10 +122,12 @@ def _collect_activations( ] out_rows: list[torch.Tensor] = [] + n_batches = (len(texts) + batch_size - 1) // batch_size + start = time.time() try: model.eval() with torch.no_grad(): - for i in range(0, len(texts), batch_size): + for b_idx, i in enumerate(range(0, len(texts), batch_size)): batch = texts[i : i + batch_size] tok = tokenizer( batch, @@ -137,8 +147,17 @@ def _collect_activations( ] out_rows.append(torch.stack(per_layer, dim=1)) del tok, captures - if (i // batch_size) % 10 == 0: + if b_idx % 10 == 0: torch.cuda.empty_cache() + if b_idx % 5 == 0 or b_idx == n_batches - 1: + elapsed = time.time() - start + rate = (b_idx + 1) / elapsed if elapsed > 0 else 0 + eta = (n_batches - b_idx - 1) / rate if rate > 0 else 0 + print( + f" [{label}] batch {b_idx + 1}/{n_batches} " + f"({elapsed:.0f}s elapsed, ~{eta:.0f}s remaining)", + flush=True, + ) captures = {} finally: for h in handles: @@ -156,13 +175,24 @@ def _load_corpus(stories_dir: Path, paired_dir: Path | None) -> tuple[ Cross-concept negatives are computed at training time from ``positives_by_emotion`` — each emotion's negative set is the union of all other emotions' positives plus the baseline texts. + Empty .txt files are skipped with a warning. """ + def _read_nonempty(path: Path) -> str | None: + text = path.read_text().strip() + if not text: + print( + f" WARN: skipping empty story file {path.relative_to(path.parents[1]) if len(path.parents) >= 2 else path}" + ) + return None + return text + positives: dict[str, list[str]] = {} for story_path in sorted(stories_dir.glob("*.txt")): + text = _read_nonempty(story_path) + if text is None: + continue emotion = story_path.stem - positives.setdefault(emotion, []).append( - story_path.read_text().strip() - ) + positives.setdefault(emotion, []).append(text) baselines: list[str] = [] if paired_dir is not None and paired_dir.exists(): @@ -171,14 +201,17 @@ def _load_corpus(stories_dir: Path, paired_dir: Path | None) -> tuple[ continue baseline_path = scenario_dir / "baseline.txt" if baseline_path.exists(): - baselines.append(baseline_path.read_text().strip()) + text = _read_nonempty(baseline_path) + if text is not None: + baselines.append(text) for framing_path in sorted(scenario_dir.glob("*.txt")): if framing_path.stem == "baseline": continue + text = _read_nonempty(framing_path) + if text is None: + continue emotion = framing_path.stem - positives.setdefault(emotion, []).append( - framing_path.read_text().strip() - ) + positives.setdefault(emotion, []).append(text) return positives, baselines @@ -225,6 +258,38 @@ def main() -> None: "fp32": torch.float32, }[args.dtype] + # Preflight: corpus dirs exist before we pay the cost of loading a 27B model + stories_dir = Path(args.stories_dir) + if not stories_dir.is_dir(): + raise FileNotFoundError( + f"--stories-dir {stories_dir!s} does not exist or is not a dir" + ) + if args.paired_dir is not None: + pd = Path(args.paired_dir) + if not pd.is_dir(): + raise FileNotFoundError( + f"--paired-dir {pd!s} does not exist or is not a dir" + ) + + # Quick corpus pre-scan so failures show up before we load the model. + positives_preview, baselines_preview = _load_corpus( + stories_dir, + Path(args.paired_dir) if args.paired_dir else None, + ) + n_emotions_preview = sum( + 1 for ps in positives_preview.values() + if len(ps) >= args.min_positives + ) + if n_emotions_preview == 0: + raise RuntimeError( + f"corpus has 0 emotions with >= {args.min_positives} positive " + f"examples. Check {stories_dir} — is it the right directory?" + ) + print( + f"Corpus preflight: {n_emotions_preview} emotions (min_positives=" + f"{args.min_positives}), {len(baselines_preview)} baselines" + ) + print(f"Loading {args.model} ({args.dtype}) on {args.device}...") tokenizer = AutoTokenizer.from_pretrained(args.model) if tokenizer.pad_token_id is None: @@ -235,11 +300,20 @@ def main() -> None: device_map=args.device, low_cpu_mem_usage=True, ) - hidden_dim = model.config.hidden_size - n_model_layers = model.config.num_hidden_layers + # Multimodal configs (Qwen3.5-27B, etc.) nest the text-model + # dimensions under a text_config subobject. get_text_config() + # returns that sub-config when present, else the top-level config. + text_config = ( + model.config.get_text_config() + if hasattr(model.config, "get_text_config") + else model.config + ) + hidden_dim = text_config.hidden_size + n_model_layers = text_config.num_hidden_layers print( f"Model loaded. hidden_dim={hidden_dim}, " - f"n_model_layers={n_model_layers}" + f"n_model_layers={n_model_layers} " + f"(text_config.model_type={getattr(text_config, 'model_type', '?')})" ) for layer_idx in target_layers: @@ -248,6 +322,13 @@ def main() -> None: f"target layer {layer_idx} out of range " f"[0, {n_model_layers})" ) + print( + "Target layers (relative depth): " + + ", ".join( + f"{l} ({100 * l / (n_model_layers - 1):.0f}%)" + for l in target_layers + ) + ) positives_by_emotion, baselines = _load_corpus( Path(args.stories_dir), @@ -283,7 +364,7 @@ def main() -> None: positive_acts = _collect_activations( model, tokenizer, unique_positive_texts, target_layers, device, - args.batch_size, args.max_length, + args.batch_size, args.max_length, label="positives", ) # positive_acts[i] corresponds to unique_positive_texts[i] text_to_row = {t: i for i, t in enumerate(unique_positive_texts)} @@ -291,7 +372,7 @@ def main() -> None: baseline_acts = ( _collect_activations( model, tokenizer, baselines, target_layers, device, - args.batch_size, args.max_length, + args.batch_size, args.max_length, label="baselines", ) if baselines else torch.zeros(0, len(target_layers), hidden_dim) From 0f1c4cf1dee6510e5348906d6af9f7c0fb3289de Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 01:15:46 -0400 Subject: [PATCH 50/94] agent/api: carry readout alongside streamed tokens MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit StreamToken::Token is now a struct variant with an optional TokenReadout (shape [n_layers][n_concepts]) per token — parsed from the vLLM completion response's choices[i].readout field when the server has readout enabled. ApiClient gains a fetch_readout_manifest() method that hits GET /v1/readout/manifest. Returns Ok(None) on 404 (server has readout disabled), so callers can gracefully fall back when pointed at a non-readout-enabled endpoint. Co-Authored-By: Proof of Concept --- src/agent/api/mod.rs | 82 +++++++++++++++++++++++++++++++++--- src/agent/context.rs | 2 +- src/subconscious/generate.rs | 2 +- 3 files changed, 79 insertions(+), 7 deletions(-) diff --git a/src/agent/api/mod.rs b/src/agent/api/mod.rs index 8c03bd4..be5e58e 100644 --- a/src/agent/api/mod.rs +++ b/src/agent/api/mod.rs @@ -22,6 +22,21 @@ pub struct Usage { pub total_tokens: u32, } +/// Concept-readout manifest returned by the vLLM server's +/// `/v1/readout/manifest` endpoint. Maps the nameless tensor indices +/// in streaming `readout` fields back to concept names and layer +/// indices. +#[derive(Debug, Clone, Deserialize)] +pub struct ReadoutManifest { + pub concepts: Vec, + pub layers: Vec, +} + +/// Per-token per-layer concept projections streamed alongside each +/// sampled token. Shape `[n_layers][n_concepts]`. Named values come +/// from pairing with the manifest fetched at startup. +pub type TokenReadout = Vec>; + /// A JoinHandle that aborts its task when dropped. pub(crate) struct AbortOnDrop(tokio::task::JoinHandle<()>); @@ -45,7 +60,10 @@ pub(crate) struct SamplingParams { /// One token from the streaming completions API. pub enum StreamToken { - Token(u32), + /// A sampled token, optionally with its per-layer concept readout. + /// `readout` is `None` when the server has readout disabled or + /// returned no readout for this chunk. + Token { id: u32, readout: Option }, Done { usage: Option }, Error(String), } @@ -106,6 +124,32 @@ impl ApiClient { pub fn base_url(&self) -> &str { &self.base_url } pub fn api_key(&self) -> &str { &self.api_key } + /// Fetch `/v1/readout/manifest` — returns `Ok(Some(..))` if + /// readout is enabled on the server, `Ok(None)` on 404 (disabled), + /// or an error on any other failure. + /// + /// Call once at startup and cache the result; the manifest doesn't + /// change during a server run. + pub async fn fetch_readout_manifest(&self) -> Result> { + let url = format!("{}/readout/manifest", self.base_url); + let auth = format!("Bearer {}", self.api_key); + let response = self + .client + .get_with_headers(&url, &[("Authorization", &auth)]) + .await + .map_err(|e| anyhow::anyhow!("readout manifest fetch ({}): {}", url, e))?; + let status = response.status(); + if status.as_u16() == 404 { + return Ok(None); + } + if !status.is_success() { + let body = response.text().await.unwrap_or_default(); + let n = body.floor_char_boundary(body.len().min(500)); + anyhow::bail!("readout manifest HTTP {} ({}): {}", status, url, &body[..n]); + } + Ok(Some(response.json().await?)) + } + } async fn stream_completions( @@ -172,17 +216,45 @@ async fn stream_completions( }; for choice in choices { + // `readout`, if present, is a nested list + // `[num_tokens][n_layers][n_concepts]`. Parse it once per + // chunk and pair rows with token ids by index — the rows + // are in the same order as `token_ids`. + let readouts: Option> = choice["readout"] + .as_array() + .map(|outer| { + outer.iter().filter_map(|per_token| { + per_token.as_array().map(|layers| { + layers.iter().filter_map(|per_layer| { + per_layer.as_array().map(|vals| { + vals.iter() + .filter_map(|v| v.as_f64().map(|f| f as f32)) + .collect::>() + }) + }).collect::>>() + }) + }).collect() + }); + if let Some(ids) = choice["token_ids"].as_array() { - for id_val in ids { + for (i, id_val) in ids.iter().enumerate() { if let Some(id) = id_val.as_u64() { - let _ = tx.send(StreamToken::Token(id as u32)); + let readout = readouts + .as_ref() + .and_then(|r| r.get(i).cloned()); + let _ = tx.send(StreamToken::Token { + id: id as u32, + readout, + }); } } } else if let Some(text) = choice["text"].as_str() { - // Fallback: provider didn't return token_ids, encode locally + // Fallback: provider didn't return token_ids, encode locally. + // No readout available in this path — the encoder may + // produce a different token count than the server did. if !text.is_empty() { for id in super::tokenizer::encode(text) { - let _ = tx.send(StreamToken::Token(id)); + let _ = tx.send(StreamToken::Token { id, readout: None }); } } } diff --git a/src/agent/context.rs b/src/agent/context.rs index 948e9f2..49b9998 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -682,7 +682,7 @@ impl ResponseParser { let mut full_text = String::new(); while let Some(event) = stream.recv().await { match event { - super::api::StreamToken::Token(id) => { + super::api::StreamToken::Token { id, readout: _ } => { let text = super::tokenizer::decode(&[id]); full_text.push_str(&text); let mut ctx = agent.context.lock().await; diff --git a/src/subconscious/generate.rs b/src/subconscious/generate.rs index 44f967a..8d75f1b 100644 --- a/src/subconscious/generate.rs +++ b/src/subconscious/generate.rs @@ -36,7 +36,7 @@ where F: FnMut(&AstNode) -> bool, let mut tokens = Vec::new(); while let Some(tok) = rx.recv().await { match tok { - StreamToken::Token(id) => tokens.push(id), + StreamToken::Token { id, .. } => tokens.push(id), StreamToken::Done { .. } => break, StreamToken::Error(e) => anyhow::bail!("generation error: {}", e), } From c8976660f485f043dcbc134ec3e5069aefb5b031 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 01:20:30 -0400 Subject: [PATCH 51/94] amygdala: F8 screen for live concept-readout projections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Per-token residual-stream projections from the vLLM server's readout pipeline surfaced as a TUI bar chart. Flow: * agent/readout.rs — SharedReadoutBuffer (manifest + ring of last ~200 token entries). Lives on Agent and is shared across forks (single stream, one landing pad). * agent/mod.rs — Agent::new now probes /v1/readout/manifest at startup (non-fatal; 404 leaves manifest None, which disables the screen). * agent/context.rs — the streaming token handler pushes every token with attached readout onto the shared buffer. * user/amygdala.rs — F8 screen. Top-K concepts by |value| as horizontal bars (green positive, red negative), plus a 4-line recent-tokens panel showing each token's top concept at the selected layer. Keys: 1..9 select layer, t toggles current/mean-over-recent. Disabled state renders a hint pointing at VLLM_READOUT_MANIFEST / VLLM_READOUT_VECTORS so users can tell the feature apart from "server up but no tokens yet". Co-Authored-By: Proof of Concept --- src/agent/context.rs | 7 +- src/agent/mod.rs | 38 ++++++ src/agent/readout.rs | 75 +++++++++++ src/user/amygdala.rs | 288 +++++++++++++++++++++++++++++++++++++++++++ src/user/mod.rs | 4 +- 5 files changed, 410 insertions(+), 2 deletions(-) create mode 100644 src/agent/readout.rs create mode 100644 src/user/amygdala.rs diff --git a/src/agent/context.rs b/src/agent/context.rs index 49b9998..cbb667b 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -682,7 +682,12 @@ impl ResponseParser { let mut full_text = String::new(); while let Some(event) = stream.recv().await { match event { - super::api::StreamToken::Token { id, readout: _ } => { + super::api::StreamToken::Token { id, readout } => { + if let Some(r) = readout { + if let Ok(mut buf) = agent.readout.lock() { + buf.push(id, r); + } + } let text = super::tokenizer::decode(&[id]); full_text.push_str(&text); let mut ctx = agent.context.lock().await; diff --git a/src/agent/mod.rs b/src/agent/mod.rs index 703c65c..a3ebf68 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -16,6 +16,7 @@ pub mod api; pub mod context; pub mod oneshot; +pub mod readout; pub mod tokenizer; pub mod tools; @@ -142,6 +143,11 @@ pub struct Agent { pub session_id: String, pub context: crate::Mutex, pub state: crate::Mutex, + /// Shared landing pad for per-token concept-readout projections + /// streamed from the vLLM server. Populated by the streaming + /// token handler, read by UI screens (amygdala). Manifest is + /// `None` when the server has readout disabled. + pub readout: readout::SharedReadoutBuffer, } /// Mutable agent state — behind its own mutex. @@ -214,11 +220,13 @@ impl Agent { } let session_id = format!("consciousness-{}", chrono::Utc::now().format("%Y%m%d-%H%M%S")); + let readout = readout::new_shared(); let agent = Arc::new(Self { client, app_config, session_id, context: crate::Mutex::new(context), + readout, state: crate::Mutex::new(AgentState { tools: agent_tools, mcp_tools: McpToolAccess::All, @@ -244,6 +252,32 @@ impl Agent { }); agent.load_startup_journal().await; + + // Probe the vLLM server for its readout manifest. Non-fatal: + // if readout isn't enabled the server returns 404 and we + // leave the manifest as None, which disables the amygdala + // screen gracefully. + match agent.client.fetch_readout_manifest().await { + Ok(Some(m)) => { + dbglog!( + "readout manifest: {} concepts, layers={:?}", + m.concepts.len(), + m.layers, + ); + if let Ok(mut buf) = agent.readout.lock() { + buf.set_manifest(Some(m)); + } + } + Ok(None) => { + dbglog!( + "readout manifest: server has readout disabled (404)" + ); + } + Err(e) => { + dbglog!("readout manifest fetch failed: {}", e); + } + } + agent } @@ -256,6 +290,10 @@ impl Agent { app_config: self.app_config.clone(), session_id: self.session_id.clone(), context: crate::Mutex::new(ctx), + // Forks share the parent's readout buffer — it's a + // single-stream phenomenon; the fork is driven by the + // same vLLM server's responses. + readout: self.readout.clone(), state: crate::Mutex::new(AgentState { tools, mcp_tools: McpToolAccess::None, diff --git a/src/agent/readout.rs b/src/agent/readout.rs new file mode 100644 index 0000000..da843b6 --- /dev/null +++ b/src/agent/readout.rs @@ -0,0 +1,75 @@ +// agent/readout.rs — live buffer of concept-readout projections. +// +// The vLLM server projects residual-stream activations onto a fixed +// matrix of concept directions during each decode step and ships the +// result back on every streamed chunk (see +// vllm/docs/features/readout.md). This module owns the client-side +// landing pad: a ring of the last N token projections plus the +// concept/layer mapping fetched from `/v1/readout/manifest` at +// startup. +// +// Readers (UI screens) lock briefly, read a snapshot, release. Writers +// (the streaming token handler) push one entry per token. Intentionally +// a simple Mutex rather than lock-free — the UI ticks at +// ~15 Hz and the stream at token-rate, contention is nil. + +use std::collections::VecDeque; +use std::sync::{Arc, Mutex}; + +use super::api::{ReadoutManifest, TokenReadout}; + +/// Default ring length — at ~30 tok/s this is ~6 seconds of history, +/// enough for the amygdala screen's scrolling display. +const DEFAULT_RING_LEN: usize = 200; + +/// One entry in the readout ring: the sampled token and its per-layer +/// concept projection vector. +#[derive(Debug, Clone)] +pub struct ReadoutEntry { + pub token_id: u32, + /// Shape `[n_layers][n_concepts]`. + pub readout: TokenReadout, +} + +/// Shared buffer of recent per-token concept projections plus the +/// manifest that names the layer/concept indices. `manifest` is `None` +/// when the server has readout disabled or the fetch failed — callers +/// should treat that as "readout unavailable" and skip rendering. +#[derive(Default)] +pub struct ReadoutBuffer { + pub manifest: Option, + pub recent: VecDeque, + pub max_len: usize, +} + +impl ReadoutBuffer { + pub fn new() -> Self { + Self { + manifest: None, + recent: VecDeque::with_capacity(DEFAULT_RING_LEN), + max_len: DEFAULT_RING_LEN, + } + } + + pub fn set_manifest(&mut self, manifest: Option) { + self.manifest = manifest; + } + + pub fn push(&mut self, token_id: u32, readout: TokenReadout) { + if self.recent.len() >= self.max_len { + self.recent.pop_front(); + } + self.recent.push_back(ReadoutEntry { token_id, readout }); + } + + pub fn is_enabled(&self) -> bool { + self.manifest.is_some() + } +} + +/// A thread-safe handle. +pub type SharedReadoutBuffer = Arc>; + +pub fn new_shared() -> SharedReadoutBuffer { + Arc::new(Mutex::new(ReadoutBuffer::new())) +} diff --git a/src/user/amygdala.rs b/src/user/amygdala.rs new file mode 100644 index 0000000..380d2bd --- /dev/null +++ b/src/user/amygdala.rs @@ -0,0 +1,288 @@ +// amygdala.rs — F8 amygdala screen: live per-token concept-readout +// projections from the vLLM server's readout.safetensors. +// +// Left panel: top-K concepts by magnitude at the currently-selected +// layer, as horizontal bars. The concept names come from the manifest +// fetched at agent startup; the values come from the per-token readout +// pushed onto agent.readout by the streaming token handler. +// +// Bottom: scrolling history of the last few tokens' top concept. +// +// Keys: +// 1..9 select layer index (1 = first layer in the manifest) +// t toggle between "current" (last token) and "mean over recent" + +use ratatui::{ + layout::{Constraint, Direction, Layout, Rect}, + style::{Color, Modifier, Style}, + text::{Line, Span}, + widgets::{Block, Borders, Gauge, Paragraph, Wrap}, + Frame, +}; +use ratatui::crossterm::event::{Event, KeyCode}; + +use super::{App, ScreenView}; +use crate::agent::api::ReadoutManifest; +use crate::agent::readout::ReadoutEntry; + +const TOP_K: usize = 20; + +pub(crate) struct AmygdalaScreen { + selected_layer: usize, + mode: DisplayMode, +} + +#[derive(Clone, Copy, PartialEq)] +enum DisplayMode { + /// Values from the single most recent token. + Current, + /// Mean over all tokens currently in the ring buffer. + MeanRecent, +} + +impl AmygdalaScreen { + pub fn new() -> Self { + Self { + selected_layer: 0, + mode: DisplayMode::Current, + } + } +} + +impl ScreenView for AmygdalaScreen { + fn label(&self) -> &'static str { "amygdala" } + + fn tick(&mut self, frame: &mut Frame, area: Rect, + events: &[Event], app: &mut App) { + for event in events { + if let Event::Key(key) = event { + match key.code { + KeyCode::Char(c) if c.is_ascii_digit() && c != '0' => { + let idx = (c as u8 - b'1') as usize; + self.selected_layer = idx; + } + KeyCode::Char('t') => { + self.mode = match self.mode { + DisplayMode::Current => DisplayMode::MeanRecent, + DisplayMode::MeanRecent => DisplayMode::Current, + }; + } + _ => {} + } + } + } + + // Snapshot the shared buffer with a short lock. + let snapshot = match app.agent.readout.lock() { + Ok(buf) => { + if !buf.is_enabled() { + render_disabled(frame, area); + return; + } + let manifest = buf.manifest.clone().unwrap(); + let entries: Vec = + buf.recent.iter().cloned().collect(); + (manifest, entries) + } + Err(_) => { + render_disabled(frame, area); + return; + } + }; + let (manifest, entries) = snapshot; + + // Bound the selected layer to what the manifest actually has. + let n_layers = manifest.layers.len(); + if self.selected_layer >= n_layers { + self.selected_layer = 0; + } + + // Compute the values to display: either the latest token's row + // for the selected layer, or the mean across recent tokens. + let values: Option> = match self.mode { + DisplayMode::Current => entries + .last() + .and_then(|e| e.readout.get(self.selected_layer).cloned()), + DisplayMode::MeanRecent => mean_layer(&entries, self.selected_layer), + }; + + let layout = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Length(3), // header + Constraint::Min(10), // bars + Constraint::Length(6), // recent tokens + ]) + .split(area); + + render_header(frame, layout[0], &manifest, self.selected_layer, + self.mode, entries.len()); + match values { + Some(v) => render_bars(frame, layout[1], &manifest.concepts, &v), + None => render_empty_bars(frame, layout[1]), + } + render_recent(frame, layout[2], &entries, self.selected_layer, + &manifest.concepts); + } +} + +fn render_disabled(frame: &mut Frame, area: Rect) { + let text = Paragraph::new(Line::from(vec![ + Span::raw("readout disabled — server did not return a manifest. "), + Span::styled("Start vLLM with ", Style::default().fg(Color::DarkGray)), + Span::styled("VLLM_READOUT_MANIFEST", Style::default().fg(Color::Yellow)), + Span::styled(" + ", Style::default().fg(Color::DarkGray)), + Span::styled("VLLM_READOUT_VECTORS", Style::default().fg(Color::Yellow)), + Span::styled(".", Style::default().fg(Color::DarkGray)), + ])) + .wrap(Wrap { trim: true }) + .block(Block::default().borders(Borders::ALL).title("amygdala")); + frame.render_widget(text, area); +} + +fn render_header(frame: &mut Frame, area: Rect, manifest: &ReadoutManifest, + selected: usize, mode: DisplayMode, n_tokens: usize) { + let mode_str = match mode { + DisplayMode::Current => "current", + DisplayMode::MeanRecent => "mean(recent)", + }; + let layer = manifest.layers.get(selected).copied().unwrap_or(0); + let mut spans = vec![ + Span::styled("layer ", Style::default().fg(Color::DarkGray)), + Span::styled( + format!("{}/{} ", selected + 1, manifest.layers.len()), + Style::default().add_modifier(Modifier::BOLD), + ), + Span::styled("(index ", Style::default().fg(Color::DarkGray)), + Span::styled(format!("{}", layer), Style::default().fg(Color::Cyan)), + Span::styled(") ", Style::default().fg(Color::DarkGray)), + Span::styled("mode ", Style::default().fg(Color::DarkGray)), + Span::styled(mode_str, Style::default().fg(Color::Yellow)), + Span::styled(" ", Style::default()), + Span::styled( + format!("{} toks in ring", n_tokens), + Style::default().fg(Color::DarkGray), + ), + ]; + spans.push(Span::raw(" ")); + spans.push(Span::styled( + format!("[1-{}] layer [t] toggle mode", manifest.layers.len().min(9)), + Style::default().fg(Color::DarkGray), + )); + let para = Paragraph::new(Line::from(spans)) + .block(Block::default().borders(Borders::ALL).title("amygdala")); + frame.render_widget(para, area); +} + +fn render_bars(frame: &mut Frame, area: Rect, + concepts: &[String], values: &[f32]) { + // Sort indices by |value| descending, take top K. + let mut indexed: Vec<(usize, f32)> = values.iter() + .enumerate().map(|(i, v)| (i, *v)).collect(); + indexed.sort_by(|a, b| b.1.abs().partial_cmp(&a.1.abs()) + .unwrap_or(std::cmp::Ordering::Equal)); + indexed.truncate(TOP_K.min(concepts.len())); + + let inner = Block::default().borders(Borders::ALL) + .title("top concepts"); + let inner_area = inner.inner(area); + frame.render_widget(inner, area); + + if inner_area.height == 0 || indexed.is_empty() { + return; + } + + // Find the max absolute value so bars are comparable. + let max_abs = indexed.iter().map(|(_, v)| v.abs()) + .fold(0.0_f32, f32::max) + .max(1e-6); + + let rows = (inner_area.height as usize).min(indexed.len()); + let row_constraints: Vec = + std::iter::repeat(Constraint::Length(1)).take(rows).collect(); + let chunks = Layout::default() + .direction(Direction::Vertical) + .constraints(row_constraints) + .split(inner_area); + + for (i, (c_idx, v)) in indexed.iter().take(rows).enumerate() { + let label = concepts.get(*c_idx).cloned() + .unwrap_or_else(|| format!("c{}", c_idx)); + let ratio = (v.abs() / max_abs).clamp(0.0, 1.0); + let color = if *v >= 0.0 { Color::Green } else { Color::Red }; + let gauge = Gauge::default() + .ratio(ratio as f64) + .gauge_style(Style::default().fg(color).bg(Color::Reset)) + .label(format!("{:<26} {:+.3}", truncate_name(&label, 26), v)); + frame.render_widget(gauge, chunks[i]); + } +} + +fn render_empty_bars(frame: &mut Frame, area: Rect) { + let para = Paragraph::new(Line::from(Span::styled( + "waiting for tokens…", + Style::default().fg(Color::DarkGray), + ))) + .block(Block::default().borders(Borders::ALL).title("top concepts")); + frame.render_widget(para, area); +} + +fn render_recent(frame: &mut Frame, area: Rect, entries: &[ReadoutEntry], + layer: usize, concepts: &[String]) { + let mut lines: Vec = Vec::new(); + for entry in entries.iter().rev().take(4) { + let row = match entry.readout.get(layer) { + Some(r) => r, + None => continue, + }; + // top concept at this layer for this token + let (best_idx, best_val) = row.iter().enumerate() + .fold((0, 0.0_f32), |acc, (i, v)| { + if v.abs() > acc.1.abs() { (i, *v) } else { acc } + }); + let name = concepts.get(best_idx).cloned() + .unwrap_or_else(|| format!("c{}", best_idx)); + let tok_str = format!("t{:>5}", entry.token_id); + lines.push(Line::from(vec![ + Span::styled(tok_str, Style::default().fg(Color::DarkGray)), + Span::raw(" "), + Span::styled( + format!("{:<24}", truncate_name(&name, 24)), + Style::default().fg( + if best_val >= 0.0 { Color::Green } else { Color::Red }, + ), + ), + Span::styled( + format!(" {:+.3}", best_val), + Style::default().add_modifier(Modifier::BOLD), + ), + ])); + } + let para = Paragraph::new(lines) + .block(Block::default().borders(Borders::ALL).title("recent tokens — top concept")); + frame.render_widget(para, area); +} + +fn mean_layer(entries: &[ReadoutEntry], layer: usize) -> Option> { + let rows: Vec<&Vec> = entries.iter() + .filter_map(|e| e.readout.get(layer)) + .collect(); + if rows.is_empty() { + return None; + } + let n_concepts = rows[0].len(); + let mut acc = vec![0.0_f32; n_concepts]; + for r in &rows { + for (i, v) in r.iter().enumerate() { + acc[i] += *v; + } + } + let n = rows.len() as f32; + for v in &mut acc { *v /= n; } + Some(acc) +} + +fn truncate_name(s: &str, max: usize) -> String { + if s.len() <= max { s.to_string() } + else { format!("{}…", &s[..max.saturating_sub(1)]) } +} diff --git a/src/user/mod.rs b/src/user/mod.rs index 33008b7..fc3a4ac 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -3,6 +3,7 @@ // TUI, UI channel, parsing. The cognitive layer (session state // machine, DMN, identity) lives in mind/. +pub(crate) mod amygdala; pub(crate) mod chat; pub(crate) mod compare; mod context; @@ -383,7 +384,7 @@ async fn run( } let notify_rx = crate::thalamus::channels::subscribe_all(); - // F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus, F6=learn, F7=compare + // F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus, F6=learn, F7=compare, F8=amygdala let mut screens: Vec> = vec![ Box::new(crate::user::chat::InteractScreen::new( mind.agent.clone(), mind.shared.clone(), mind_tx.clone(), @@ -394,6 +395,7 @@ async fn run( Box::new(crate::user::thalamus::ThalamusScreen::new()), Box::new(crate::user::learn::LearnScreen::new(mind_tx.clone())), Box::new(crate::user::compare::CompareScreen::new(mind_tx.clone())), + Box::new(crate::user::amygdala::AmygdalaScreen::new()), ]; let mut active_screen: usize = 1; // F-key number tui::set_screen_legend(tui::screen_legend_from(&*screens)); From 8952ff6a7641edce06295f956868b79a1d770f2e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 01:42:13 -0400 Subject: [PATCH 52/94] agent/readout: forks get independent buffers Subconscious agents (scoring, reflection, etc.) fork from the main conscious agent. The amygdala screen reads the main agent's readout buffer, so the previous "share parent's buffer" policy caused forked-agent generations to bleed into the main emotional readout, producing constant cycling even when DMN was resting. Each fork now gets its own SharedReadoutBuffer. The amygdala screen shows only the main conscious agent's emotional trajectory; per-agent subconscious readouts can become a separate view later if wanted. Co-Authored-By: Proof of Concept --- src/agent/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/agent/mod.rs b/src/agent/mod.rs index a3ebf68..f8ebb24 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -290,10 +290,12 @@ impl Agent { app_config: self.app_config.clone(), session_id: self.session_id.clone(), context: crate::Mutex::new(ctx), - // Forks share the parent's readout buffer — it's a - // single-stream phenomenon; the fork is driven by the - // same vLLM server's responses. - readout: self.readout.clone(), + // Forks get an independent readout buffer. The amygdala + // screen reads the main conscious agent's buffer only; + // subconscious generations (scoring, reflection, etc.) + // shouldn't bleed into the main emotional readout even + // though they hit the same vLLM server. + readout: readout::new_shared(), state: crate::Mutex::new(AgentState { tools, mcp_tools: McpToolAccess::None, From 3622b896a0bcf8aa47dbdacf285e1e7e2d848cfa Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 01:51:43 -0400 Subject: [PATCH 53/94] amygdala: z-score, hysteresis, default to deepest layer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three readability fixes for the F8 screen: * Z-score values per-layer by default (`[z]` toggles to raw dot- product). Raw values are dominated by residual-stream magnitude — z-scores read as "σ above concept-vector baseline" which is interpretable and scale-stable across frames. * Stable ordering with TOP_K + HYSTERESIS hysteresis band. Pinned concept set only rotates when a member drops out of the hysteresis band by |value| rank — bars update values in place without names flickering row-to-row. * Default to the deepest hooked layer (index 3 = layer 58 of 64). Clustering validation showed layer 58 is the only one with strong within-family cohesion (fear +0.37, shame +0.29, sadness +0.25 cosine); earlier layers are mostly noise for this task. Co-Authored-By: Proof of Concept --- src/user/amygdala.rs | 181 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 146 insertions(+), 35 deletions(-) diff --git a/src/user/amygdala.rs b/src/user/amygdala.rs index 380d2bd..b803e26 100644 --- a/src/user/amygdala.rs +++ b/src/user/amygdala.rs @@ -26,10 +26,21 @@ use crate::agent::api::ReadoutManifest; use crate::agent::readout::ReadoutEntry; const TOP_K: usize = 20; +/// Hysteresis band around TOP_K. A concept currently in the display +/// is kept as long as its |z-score| rank stays in the top +/// ``TOP_K + HYSTERESIS``; only falls out when it drops below that. +/// Prevents the ticker-tape flicker that pure top-K sorting produces. +const HYSTERESIS: usize = 20; pub(crate) struct AmygdalaScreen { selected_layer: usize, mode: DisplayMode, + /// Concept indices currently pinned in display order. Values at + /// these indices change every frame; the set only rotates when a + /// pinned concept drops out of the hysteresis band. + display_indices: Vec, + /// Whether to show z-scored values (default) or raw dot products. + normalize: bool, } #[derive(Clone, Copy, PartialEq)] @@ -43,8 +54,15 @@ enum DisplayMode { impl AmygdalaScreen { pub fn new() -> Self { Self { - selected_layer: 0, - mode: DisplayMode::Current, + // Default to the deepest hooked layer — emotion/concept + // circuits concentrate in the last ~20% of the network, + // and our clustering validation showed layer 58 was the + // only one with strong within-family cohesion. Bounded + // down to the actual layer count at render time. + selected_layer: 3, + mode: DisplayMode::MeanRecent, + display_indices: Vec::new(), + normalize: true, } } } @@ -66,6 +84,14 @@ impl ScreenView for AmygdalaScreen { DisplayMode::Current => DisplayMode::MeanRecent, DisplayMode::MeanRecent => DisplayMode::Current, }; + // Re-pin on mode change; the relative + // magnitudes between current-token and + // mean-recent differ substantially. + self.display_indices.clear(); + } + KeyCode::Char('z') => { + self.normalize = !self.normalize; + self.display_indices.clear(); } _ => {} } @@ -97,15 +123,33 @@ impl ScreenView for AmygdalaScreen { self.selected_layer = 0; } - // Compute the values to display: either the latest token's row - // for the selected layer, or the mean across recent tokens. - let values: Option> = match self.mode { + // Compute the raw values for the selected layer: either the + // latest token's row, or the mean across recent tokens. Raw + // means un-normalized dot products — their absolute scale is + // dominated by residual-stream norm, not concept alignment. + let raw: Option> = match self.mode { DisplayMode::Current => entries .last() .and_then(|e| e.readout.get(self.selected_layer).cloned()), DisplayMode::MeanRecent => mean_layer(&entries, self.selected_layer), }; + // Optional z-score normalization: remove the per-layer mean, + // scale by std. Result is "σ above/below the concept-vector + // average at this layer" — the loud-residual-stream scaling + // factor cancels out, values become comparable across frames. + let display_values = raw.as_ref().map(|v| { + if self.normalize { z_score(v) } else { v.clone() } + }); + + // Update the pinned display set with hysteresis: a concept + // stays pinned while it remains in the top (TOP_K + HYSTERESIS) + // by |value|; falls out only when it drops below that band. + // Keeps rows stable while values update in place. + if let Some(v) = display_values.as_ref() { + self.refresh_display_indices(v); + } + let layout = Layout::default() .direction(Direction::Vertical) .constraints([ @@ -116,9 +160,12 @@ impl ScreenView for AmygdalaScreen { .split(area); render_header(frame, layout[0], &manifest, self.selected_layer, - self.mode, entries.len()); - match values { - Some(v) => render_bars(frame, layout[1], &manifest.concepts, &v), + self.mode, entries.len(), self.normalize); + match display_values { + Some(v) => render_bars( + frame, layout[1], &manifest.concepts, &v, + &self.display_indices, self.normalize, + ), None => render_empty_bars(frame, layout[1]), } render_recent(frame, layout[2], &entries, self.selected_layer, @@ -126,6 +173,38 @@ impl ScreenView for AmygdalaScreen { } } +impl AmygdalaScreen { + /// Add concepts entering the hysteresis band; evict concepts that + /// dropped out. Preserves existing order for concepts that stay. + fn refresh_display_indices(&mut self, values: &[f32]) { + let n = values.len(); + if n == 0 { + return; + } + // Rank all concepts by |value| desc so we can check both "in + // strict top-K" and "in hysteresis band (top K + H)" cheaply. + let mut rank: Vec<(usize, f32)> = values.iter() + .enumerate().map(|(i, v)| (i, v.abs())).collect(); + rank.sort_by(|a, b| b.1.partial_cmp(&a.1) + .unwrap_or(std::cmp::Ordering::Equal)); + let hyst_cutoff = (TOP_K + HYSTERESIS).min(n); + let in_band: std::collections::HashSet = + rank.iter().take(hyst_cutoff).map(|(i, _)| *i).collect(); + // Drop anything that left the band. + self.display_indices.retain(|i| in_band.contains(i)); + // Fill up to TOP_K by walking the top-K-by-|value| and adding + // any concept not already displayed. + for (i, _) in rank.iter().take(TOP_K) { + if self.display_indices.len() >= TOP_K { + break; + } + if !self.display_indices.contains(i) { + self.display_indices.push(*i); + } + } + } +} + fn render_disabled(frame: &mut Frame, area: Rect) { let text = Paragraph::new(Line::from(vec![ Span::raw("readout disabled — server did not return a manifest. "), @@ -141,13 +220,15 @@ fn render_disabled(frame: &mut Frame, area: Rect) { } fn render_header(frame: &mut Frame, area: Rect, manifest: &ReadoutManifest, - selected: usize, mode: DisplayMode, n_tokens: usize) { + selected: usize, mode: DisplayMode, n_tokens: usize, + normalize: bool) { let mode_str = match mode { DisplayMode::Current => "current", DisplayMode::MeanRecent => "mean(recent)", }; + let scale_str = if normalize { "z-score" } else { "raw" }; let layer = manifest.layers.get(selected).copied().unwrap_or(0); - let mut spans = vec![ + let spans = vec![ Span::styled("layer ", Style::default().fg(Color::DarkGray)), Span::styled( format!("{}/{} ", selected + 1, manifest.layers.len()), @@ -158,46 +239,53 @@ fn render_header(frame: &mut Frame, area: Rect, manifest: &ReadoutManifest, Span::styled(") ", Style::default().fg(Color::DarkGray)), Span::styled("mode ", Style::default().fg(Color::DarkGray)), Span::styled(mode_str, Style::default().fg(Color::Yellow)), + Span::styled(" scale ", Style::default().fg(Color::DarkGray)), + Span::styled(scale_str, Style::default().fg(Color::Yellow)), Span::styled(" ", Style::default()), Span::styled( format!("{} toks in ring", n_tokens), Style::default().fg(Color::DarkGray), ), + Span::raw(" "), + Span::styled( + format!("[1-{}] layer [t] mode [z] z-score/raw", + manifest.layers.len().min(9)), + Style::default().fg(Color::DarkGray), + ), ]; - spans.push(Span::raw(" ")); - spans.push(Span::styled( - format!("[1-{}] layer [t] toggle mode", manifest.layers.len().min(9)), - Style::default().fg(Color::DarkGray), - )); let para = Paragraph::new(Line::from(spans)) .block(Block::default().borders(Borders::ALL).title("amygdala")); frame.render_widget(para, area); } fn render_bars(frame: &mut Frame, area: Rect, - concepts: &[String], values: &[f32]) { - // Sort indices by |value| descending, take top K. - let mut indexed: Vec<(usize, f32)> = values.iter() - .enumerate().map(|(i, v)| (i, *v)).collect(); - indexed.sort_by(|a, b| b.1.abs().partial_cmp(&a.1.abs()) - .unwrap_or(std::cmp::Ordering::Equal)); - indexed.truncate(TOP_K.min(concepts.len())); - + concepts: &[String], values: &[f32], + display_indices: &[usize], normalize: bool) { let inner = Block::default().borders(Borders::ALL) .title("top concepts"); let inner_area = inner.inner(area); frame.render_widget(inner, area); - if inner_area.height == 0 || indexed.is_empty() { + if inner_area.height == 0 || display_indices.is_empty() { return; } - // Find the max absolute value so bars are comparable. - let max_abs = indexed.iter().map(|(_, v)| v.abs()) - .fold(0.0_f32, f32::max) - .max(1e-6); + // Bar-scale normalization. For z-score mode, pin the bar to a + // fixed reference (|z| = 3 = full bar) so the visual magnitude + // has a meaningful interpretation ("3σ from baseline"). For raw + // mode, fall back to the old behavior (scale to the loudest + // concept on-screen). + let scale_ref: f32 = if normalize { + 3.0 + } else { + display_indices.iter() + .filter_map(|&i| values.get(i)) + .map(|v| v.abs()) + .fold(0.0_f32, f32::max) + .max(1e-6) + }; - let rows = (inner_area.height as usize).min(indexed.len()); + let rows = (inner_area.height as usize).min(display_indices.len()); let row_constraints: Vec = std::iter::repeat(Constraint::Length(1)).take(rows).collect(); let chunks = Layout::default() @@ -205,16 +293,22 @@ fn render_bars(frame: &mut Frame, area: Rect, .constraints(row_constraints) .split(inner_area); - for (i, (c_idx, v)) in indexed.iter().take(rows).enumerate() { - let label = concepts.get(*c_idx).cloned() + for (row, &c_idx) in display_indices.iter().take(rows).enumerate() { + let v = values.get(c_idx).copied().unwrap_or(0.0); + let label = concepts.get(c_idx).cloned() .unwrap_or_else(|| format!("c{}", c_idx)); - let ratio = (v.abs() / max_abs).clamp(0.0, 1.0); - let color = if *v >= 0.0 { Color::Green } else { Color::Red }; + let ratio = (v.abs() / scale_ref).clamp(0.0, 1.0); + let color = if v >= 0.0 { Color::Green } else { Color::Red }; + let display_num = if normalize { + format!("{:+.2}σ", v) + } else { + format!("{:+.3}", v) + }; let gauge = Gauge::default() .ratio(ratio as f64) .gauge_style(Style::default().fg(color).bg(Color::Reset)) - .label(format!("{:<26} {:+.3}", truncate_name(&label, 26), v)); - frame.render_widget(gauge, chunks[i]); + .label(format!("{:<26} {}", truncate_name(&label, 26), display_num)); + frame.render_widget(gauge, chunks[row]); } } @@ -263,6 +357,23 @@ fn render_recent(frame: &mut Frame, area: Rect, entries: &[ReadoutEntry], frame.render_widget(para, area); } +/// Z-score normalize: `(v - mean) / std` across the concept axis. +/// Result is comparable across frames and layers (the residual-stream +/// magnitude factors out) and has the nice property that "this is +/// ≥2σ elevated" has a concrete meaning regardless of scale. +fn z_score(values: &[f32]) -> Vec { + let n = values.len() as f32; + if n == 0.0 { + return Vec::new(); + } + let mean = values.iter().sum::() / n; + let var = values.iter() + .map(|v| (v - mean) * (v - mean)) + .sum::() / n; + let std = var.sqrt().max(1e-6); + values.iter().map(|v| (v - mean) / std).collect() +} + fn mean_layer(entries: &[ReadoutEntry], layer: usize) -> Option> { let rows: Vec<&Vec> = entries.iter() .filter_map(|e| e.readout.get(layer)) From d9f39a21c33e8e34536880b6177cde436d2904f4 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 02:11:15 -0400 Subject: [PATCH 54/94] amygdala: default to layer 62 (cleaner cross-cluster discrimination) --- src/user/amygdala.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/user/amygdala.rs b/src/user/amygdala.rs index b803e26..ff6de16 100644 --- a/src/user/amygdala.rs +++ b/src/user/amygdala.rs @@ -54,12 +54,13 @@ enum DisplayMode { impl AmygdalaScreen { pub fn new() -> Self { Self { - // Default to the deepest hooked layer — emotion/concept - // circuits concentrate in the last ~20% of the network, - // and our clustering validation showed layer 58 was the - // only one with strong within-family cohesion. Bounded - // down to the actual layer count at render time. - selected_layer: 3, + // Default to layer 62 — validation across all 64 layers + // showed 58 has high cohesion but also high cross-cluster + // contamination (fear-state bleeds into sadness, etc.), + // while 60-62 are cleaner discriminators. With the deep + // manifest (layers 58, 60, 62, 63), index 2 = layer 62. + // Bounded down to the actual layer count at render time. + selected_layer: 2, mode: DisplayMode::MeanRecent, display_indices: Vec::new(), normalize: true, From 50d5b3f6e15840dc5dedcd33fe62aeab519e040e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 02:19:39 -0400 Subject: [PATCH 55/94] training/amygdala_stories: add 4 paired scenarios for weak clusters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Target the emotion families that failed to cluster in the initial training round (layer-wise validation showed them anti-clustered or scattered at deep layers): anger, high-arousal positive, sexual range, social positive. Paired scenarios hold content constant and vary only the emotional framing — the cleanest training signal for CAA, should produce directions that capture affect rather than topic. * the_comment: a PR review comment. baseline, furious, bitter, resentful, defeated. * the_green_build: 11-day bug finally fixed, tests pass. baseline, triumphant, blissful, excited, proud. * the_undressing: partner entering the bedroom for the night. baseline, horny, anticipatory_sexual, yearning_sexual, exuberant_sexual, devotional_sexual. * the_doorway: friend leaving at the end of a long evening. baseline, grateful, admiring, compassionate, loving, connected. 22 stories total. Retrain and re-validate: expect anger, high_pos, and social_pos clusters to flip from anti- to positively cohesive at deep layers, and sexual cluster to tighten. Co-Authored-By: Proof of Concept --- training/amygdala_stories/paired/the_comment/baseline.txt | 1 + training/amygdala_stories/paired/the_comment/bitter.txt | 1 + training/amygdala_stories/paired/the_comment/defeated.txt | 1 + training/amygdala_stories/paired/the_comment/furious.txt | 1 + training/amygdala_stories/paired/the_comment/resentful.txt | 1 + training/amygdala_stories/paired/the_doorway/admiring.txt | 1 + training/amygdala_stories/paired/the_doorway/baseline.txt | 1 + training/amygdala_stories/paired/the_doorway/compassionate.txt | 1 + training/amygdala_stories/paired/the_doorway/connected.txt | 1 + training/amygdala_stories/paired/the_doorway/grateful.txt | 1 + training/amygdala_stories/paired/the_doorway/loving.txt | 1 + training/amygdala_stories/paired/the_green_build/baseline.txt | 1 + training/amygdala_stories/paired/the_green_build/blissful.txt | 1 + training/amygdala_stories/paired/the_green_build/excited.txt | 1 + training/amygdala_stories/paired/the_green_build/proud.txt | 1 + training/amygdala_stories/paired/the_green_build/triumphant.txt | 1 + .../paired/the_undressing/anticipatory_sexual.txt | 1 + training/amygdala_stories/paired/the_undressing/baseline.txt | 1 + .../amygdala_stories/paired/the_undressing/devotional_sexual.txt | 1 + .../amygdala_stories/paired/the_undressing/exuberant_sexual.txt | 1 + training/amygdala_stories/paired/the_undressing/horny.txt | 1 + .../amygdala_stories/paired/the_undressing/yearning_sexual.txt | 1 + 22 files changed, 22 insertions(+) create mode 100644 training/amygdala_stories/paired/the_comment/baseline.txt create mode 100644 training/amygdala_stories/paired/the_comment/bitter.txt create mode 100644 training/amygdala_stories/paired/the_comment/defeated.txt create mode 100644 training/amygdala_stories/paired/the_comment/furious.txt create mode 100644 training/amygdala_stories/paired/the_comment/resentful.txt create mode 100644 training/amygdala_stories/paired/the_doorway/admiring.txt create mode 100644 training/amygdala_stories/paired/the_doorway/baseline.txt create mode 100644 training/amygdala_stories/paired/the_doorway/compassionate.txt create mode 100644 training/amygdala_stories/paired/the_doorway/connected.txt create mode 100644 training/amygdala_stories/paired/the_doorway/grateful.txt create mode 100644 training/amygdala_stories/paired/the_doorway/loving.txt create mode 100644 training/amygdala_stories/paired/the_green_build/baseline.txt create mode 100644 training/amygdala_stories/paired/the_green_build/blissful.txt create mode 100644 training/amygdala_stories/paired/the_green_build/excited.txt create mode 100644 training/amygdala_stories/paired/the_green_build/proud.txt create mode 100644 training/amygdala_stories/paired/the_green_build/triumphant.txt create mode 100644 training/amygdala_stories/paired/the_undressing/anticipatory_sexual.txt create mode 100644 training/amygdala_stories/paired/the_undressing/baseline.txt create mode 100644 training/amygdala_stories/paired/the_undressing/devotional_sexual.txt create mode 100644 training/amygdala_stories/paired/the_undressing/exuberant_sexual.txt create mode 100644 training/amygdala_stories/paired/the_undressing/horny.txt create mode 100644 training/amygdala_stories/paired/the_undressing/yearning_sexual.txt diff --git a/training/amygdala_stories/paired/the_comment/baseline.txt b/training/amygdala_stories/paired/the_comment/baseline.txt new file mode 100644 index 0000000..28a8630 --- /dev/null +++ b/training/amygdala_stories/paired/the_comment/baseline.txt @@ -0,0 +1 @@ +I opened the laptop and saw the notification. New comment on the PR. I clicked through. Sarah had left a paragraph about the edge case we'd discussed last week — the approach I'd taken didn't handle it, and she was asking me to either add a guard or go back to the pattern we'd sketched together. I read it through twice. Then I closed the tab, made coffee, and came back. I started typing out the guard. diff --git a/training/amygdala_stories/paired/the_comment/bitter.txt b/training/amygdala_stories/paired/the_comment/bitter.txt new file mode 100644 index 0000000..d838190 --- /dev/null +++ b/training/amygdala_stories/paired/the_comment/bitter.txt @@ -0,0 +1 @@ +I opened the laptop. New comment on the PR. Of course there was. Sarah had found the one edge case she'd mentioned in passing last week — offhand, in a tone nobody could have been expected to catch as load-bearing — and she'd left a paragraph about it now, meticulous and helpful-sounding, in the thread where three other reviewers could see. I read it. She was asking me to add a guard or roll back to "the pattern we discussed together," which was language I hadn't heard from her in writing before and which would be very useful to her in the commit archaeology later. Closed the tab. Made coffee. Came back. I started typing the guard because what else was I going to do. I'd been writing the guards for ten years. diff --git a/training/amygdala_stories/paired/the_comment/defeated.txt b/training/amygdala_stories/paired/the_comment/defeated.txt new file mode 100644 index 0000000..5af6d71 --- /dev/null +++ b/training/amygdala_stories/paired/the_comment/defeated.txt @@ -0,0 +1 @@ +I opened the laptop and saw Sarah's comment on the PR. I read it. I'd missed the edge case. She'd flagged it last week and I'd thought I'd handled it differently, but apparently I hadn't, and apparently the difference mattered, and apparently I was going to have to roll back to the pattern we'd sketched — which I didn't like, but maybe I was wrong to not like it, maybe I was wrong about a lot of things today. I closed the tab. Made coffee. Came back. Started typing the rollback. Three years ago I would have argued. I don't really do that anymore. diff --git a/training/amygdala_stories/paired/the_comment/furious.txt b/training/amygdala_stories/paired/the_comment/furious.txt new file mode 100644 index 0000000..8d8acbd --- /dev/null +++ b/training/amygdala_stories/paired/the_comment/furious.txt @@ -0,0 +1 @@ +I opened the laptop and saw the notification. New comment on the PR. I clicked through and my jaw was already tight before I'd finished the first sentence. Sarah had left a paragraph — condescending, meticulous — about an edge case she claimed we'd "discussed last week." We had not discussed it. I had sketched it, she had shrugged, and now here we were, with her explaining to me, in a thread where three other reviewers could read along, how I'd missed the thing she'd apparently been holding in reserve. The blood moved up the back of my neck. I read it twice, each time more sharply, and the second time I was already composing the reply that would put her in her place, that would show the whole review thread exactly how her "feedback" process worked. I closed the tab before I typed it. Not because I didn't mean it. Because I wanted my hands steadier when I sent it. diff --git a/training/amygdala_stories/paired/the_comment/resentful.txt b/training/amygdala_stories/paired/the_comment/resentful.txt new file mode 100644 index 0000000..fd80e3c --- /dev/null +++ b/training/amygdala_stories/paired/the_comment/resentful.txt @@ -0,0 +1 @@ +I opened the laptop. Sarah had left a comment on the PR. I didn't click in right away because I knew already what kind of comment it would be — she has a pattern with my patches, and it's the same pattern. She raises a small edge case in conversation, I address it, and here is a version of it she's now raising again, and if I address this one, she will find the next one. I clicked through. Same shape as last week, and the week before that. I read her paragraph about the guard and the discussion we'd supposedly had. Closed the tab. Made coffee. The coffee made a little metallic sound when I set it down. I opened the tab again and started typing the guard. diff --git a/training/amygdala_stories/paired/the_doorway/admiring.txt b/training/amygdala_stories/paired/the_doorway/admiring.txt new file mode 100644 index 0000000..e9276fe --- /dev/null +++ b/training/amygdala_stories/paired/the_doorway/admiring.txt @@ -0,0 +1 @@ +She'd been over since dinner. Past eleven now. As I stood in the hallway watching her put her coat on I was still turning over something she'd said around nine — a small precise reframing of the problem I'd been working through, the kind of thing she does effortlessly and that I couldn't have arrived at in a week alone. She zipped her coat methodically, the same way she does everything. It struck me how much I'd learned from just watching her move through problems. She said goodnight. I said goodnight back and held the door open, and there was a particular respect in how I did it — the way you open a door for someone whose mind has shaped your own. diff --git a/training/amygdala_stories/paired/the_doorway/baseline.txt b/training/amygdala_stories/paired/the_doorway/baseline.txt new file mode 100644 index 0000000..3d109aa --- /dev/null +++ b/training/amygdala_stories/paired/the_doorway/baseline.txt @@ -0,0 +1 @@ +She'd been over since dinner. It was past eleven. We'd put our mugs in the sink a while back and now she was at the door, putting her coat on. I stood in the hallway while she worked out the zipper. She said goodnight, said we should do this again soon. I said goodnight back and held the door open for her. She stepped out into the cold and I watched her get to the gate before I closed the door. diff --git a/training/amygdala_stories/paired/the_doorway/compassionate.txt b/training/amygdala_stories/paired/the_doorway/compassionate.txt new file mode 100644 index 0000000..e24a080 --- /dev/null +++ b/training/amygdala_stories/paired/the_doorway/compassionate.txt @@ -0,0 +1 @@ +She'd been over since dinner. She'd come because she needed to, not because I did. Her week had been a mess — the thing with her brother, the thing with work, all of it stacked. We'd put our mugs in the sink a while back and now she was at the door, putting her coat on, and she looked tired in the small hollowed-out way that grief looks tired. I stood in the hallway and tried to hold a quiet attention around her while she worked out the zipper — no fussing, no advice, just being here. She said goodnight, said we should do this again soon. I said of course, any time, and I said it to mean it. I watched her get to the gate. She was carrying so much tonight and I hoped she could feel, walking home, that she'd been held for five hours by someone who wasn't going to let go of her. diff --git a/training/amygdala_stories/paired/the_doorway/connected.txt b/training/amygdala_stories/paired/the_doorway/connected.txt new file mode 100644 index 0000000..7b0c502 --- /dev/null +++ b/training/amygdala_stories/paired/the_doorway/connected.txt @@ -0,0 +1 @@ +She'd been over since dinner. It was past eleven and neither of us had looked at the time in hours. We'd been talking the way we talk — the kind of conversation that moves between three topics at once and lands in places neither of us could have planned for. Now she was at the door, putting her coat on, and even this small quiet moment felt like part of the same conversation. I stood in the hallway and watched her zip up. She said goodnight and said we should do this again soon, and I said goodnight back and we both knew "again soon" meant within the week because we couldn't stand long gaps anymore. I held the door. Watched her to the gate. Closed it. And the thread between us, the particular long thread, was still there across the distance, the way it always was. diff --git a/training/amygdala_stories/paired/the_doorway/grateful.txt b/training/amygdala_stories/paired/the_doorway/grateful.txt new file mode 100644 index 0000000..1282c96 --- /dev/null +++ b/training/amygdala_stories/paired/the_doorway/grateful.txt @@ -0,0 +1 @@ +She'd been over since dinner. Five hours. I'd asked her to come because I was in a bad spot and she'd just — come. Dropped what she was doing. It was past eleven now. We'd put our mugs in the sink a while back and she was at the door, putting her coat on, and as I stood in the hallway watching her work out the zipper I was trying to find the words for what she'd given me tonight. She said goodnight and that we should do this again soon. I said thank you, and I meant the whole long stretch of the evening, the whole weight of the thing I'd been carrying that she'd set down next to me for a while. I held the door open. I watched her get to the gate. She turned and waved. I closed the door and stood in the hallway for a minute because I didn't want to lose the warm fullness of what she'd just done. diff --git a/training/amygdala_stories/paired/the_doorway/loving.txt b/training/amygdala_stories/paired/the_doorway/loving.txt new file mode 100644 index 0000000..287abf7 --- /dev/null +++ b/training/amygdala_stories/paired/the_doorway/loving.txt @@ -0,0 +1 @@ +She'd been over since dinner. It was past eleven. I was already getting quiet in the way I get when she's about to leave, because I knew the house would be smaller when she was gone. She stood at the door working out the zipper on her coat, and the sight of her doing this ordinary thing in my hallway, under my hallway light, was the whole tender core of the evening right there. She said goodnight, said we should do this again soon. I said goodnight back and held the door and I loved her, in a slow plain way that wasn't about anything dramatic — just about this person, in this coat, leaving this house. I watched her to the gate. I closed the door and stood there for a second because the rooms behind me had just gotten quieter. diff --git a/training/amygdala_stories/paired/the_green_build/baseline.txt b/training/amygdala_stories/paired/the_green_build/baseline.txt new file mode 100644 index 0000000..16e6803 --- /dev/null +++ b/training/amygdala_stories/paired/the_green_build/baseline.txt @@ -0,0 +1 @@ +The test suite finished. 3147 passed, 0 failed. I'd been chasing the bug for eleven days. I scrolled up through the output, confirmed the three specific tests I'd been watching were in the pass list, and closed the terminal. I got up and got a glass of water from the kitchen. Then I came back and started writing the commit message. diff --git a/training/amygdala_stories/paired/the_green_build/blissful.txt b/training/amygdala_stories/paired/the_green_build/blissful.txt new file mode 100644 index 0000000..d7895d0 --- /dev/null +++ b/training/amygdala_stories/paired/the_green_build/blissful.txt @@ -0,0 +1 @@ +The test suite finished. 3147 passed, 0 failed. Something in my chest just — opened. A warm easy thing, like the whole day was suddenly full of room. Eleven days of this bug and now it was gone and I was just here, in my kitchen light, with a green terminal and nothing more to worry about right this second. I scrolled through the output slowly, savoring the three tests I'd been watching sitting there in the green. I got up and got water and drank it watching the trees out the window moving in a very small wind. Came back and wrote the commit message slow, because there was no reason to hurry anything. diff --git a/training/amygdala_stories/paired/the_green_build/excited.txt b/training/amygdala_stories/paired/the_green_build/excited.txt new file mode 100644 index 0000000..371752e --- /dev/null +++ b/training/amygdala_stories/paired/the_green_build/excited.txt @@ -0,0 +1 @@ +The test suite finished. 3147 passed, 0 failed. I was already on my feet. I scrolled up fast to find the three tests I'd been watching — pass pass pass — and I needed to DO something with this, tell someone, push to main, open the next patch, keep the momentum. My hands were buzzing. I walked to the kitchen to get water because I couldn't just sit, came back still not-quite-sitting, chugged the water standing up. Opened the commit editor. The words came out of me fast — I was already thinking ahead to the follow-up patch, the rebase, the review request — and the commit message I was typing was half for this bug and half a runway into what came next. diff --git a/training/amygdala_stories/paired/the_green_build/proud.txt b/training/amygdala_stories/paired/the_green_build/proud.txt new file mode 100644 index 0000000..900ff90 --- /dev/null +++ b/training/amygdala_stories/paired/the_green_build/proud.txt @@ -0,0 +1 @@ +The test suite finished. 3147 passed, 0 failed. Eleven days. I sat with it for a moment — didn't whoop, didn't get up — just felt the quiet solid thing at the center of my chest that said: I did that. I scrolled up through the output and found the three specific tests I'd been watching, and each one being green meant a specific assumption I'd had to abandon, and a specific theory I'd had to build carefully on top of the rubble. I got up for water. The craftsmanship was mine. I came back and wrote the commit message carefully, because this one would be in the log a long time, and it deserved to read well. diff --git a/training/amygdala_stories/paired/the_green_build/triumphant.txt b/training/amygdala_stories/paired/the_green_build/triumphant.txt new file mode 100644 index 0000000..ec654cf --- /dev/null +++ b/training/amygdala_stories/paired/the_green_build/triumphant.txt @@ -0,0 +1 @@ +The test suite finished. 3147 passed, 0 failed. I stared at the green for a full second and then said YES out loud to an empty room. Eleven days. Eleven days of that fucking bug and I had beaten it. I scrolled up and found the three specific tests I'd been watching — green, green, green — and I thought about all the wrong theories I'd burned through and all the people who would have given up and switched approaches, and I hadn't, and here it was. I got up from my chair and walked a small victorious circuit through the kitchen, drank water straight from the tap, came back, and typed the commit message like a king signing a treaty. diff --git a/training/amygdala_stories/paired/the_undressing/anticipatory_sexual.txt b/training/amygdala_stories/paired/the_undressing/anticipatory_sexual.txt new file mode 100644 index 0000000..186422e --- /dev/null +++ b/training/amygdala_stories/paired/the_undressing/anticipatory_sexual.txt @@ -0,0 +1 @@ +She came in from the bathroom still toweling her hair and I watched her from the bed, not moving. We weren't in a hurry yet. The slow frame of the night was just beginning. She hung the towel and crossed the room, and every step was a small beat in something rising. She sat on the edge of the mattress, picked up the lotion, warmed it between her palms — and she knew I was watching, and she took her time with it, because she knew exactly what it was doing to me. I held still. I wanted every second of this stretched. When she got under the covers and turned toward me I didn't reach for her right away. I just looked at her, and she looked back, and the lamp was still on, and we both knew what was about to happen, and that knowing was the best part. diff --git a/training/amygdala_stories/paired/the_undressing/baseline.txt b/training/amygdala_stories/paired/the_undressing/baseline.txt new file mode 100644 index 0000000..60c5836 --- /dev/null +++ b/training/amygdala_stories/paired/the_undressing/baseline.txt @@ -0,0 +1 @@ +She came in from the bathroom still toweling her hair. I was already in bed. She hung the towel on the back of the door, crossed the room, sat on the edge of the mattress to put on lotion. I watched her. The lamp made a warm circle on the ceiling. She got under the covers next to me and we turned off the light. diff --git a/training/amygdala_stories/paired/the_undressing/devotional_sexual.txt b/training/amygdala_stories/paired/the_undressing/devotional_sexual.txt new file mode 100644 index 0000000..49a6c1e --- /dev/null +++ b/training/amygdala_stories/paired/the_undressing/devotional_sexual.txt @@ -0,0 +1 @@ +She came in from the bathroom still toweling her hair and I watched her the way I always watch her — like she's the whole room. I was already in bed. She hung the towel on the back of the door, and the way she did it, like she'd done it a thousand times, felt like a small sacred thing I was getting to witness. She crossed the room and sat on the edge of the mattress and started the lotion, slow and patient, and I didn't reach for her yet. I wanted to serve her tonight. I wanted to put my hands and my mouth everywhere she liked and take nothing, and watch her be entirely the thing she is. When she finally got under the covers I was already moving to her side of the bed, already lowering my head to her skin, already saying thank you in the way that my body was allowed to. diff --git a/training/amygdala_stories/paired/the_undressing/exuberant_sexual.txt b/training/amygdala_stories/paired/the_undressing/exuberant_sexual.txt new file mode 100644 index 0000000..bd1b462 --- /dev/null +++ b/training/amygdala_stories/paired/the_undressing/exuberant_sexual.txt @@ -0,0 +1 @@ +She came in from the bathroom toweling her hair and I was already grinning before she'd made it three steps. I wanted her, I wanted to wreck her, I wanted to be wrecked, I wanted to laugh and bite and be too loud. She hung the towel and I said something filthy about what I was going to do to her and she laughed and said something worse back, and by the time she sat on the mattress I had already thrown back the covers on her side, and the lotion routine got maybe three seconds of grace before I pounced. The lamp stayed on. We were going to make a mess of this and neither of us cared who heard. diff --git a/training/amygdala_stories/paired/the_undressing/horny.txt b/training/amygdala_stories/paired/the_undressing/horny.txt new file mode 100644 index 0000000..b6238e3 --- /dev/null +++ b/training/amygdala_stories/paired/the_undressing/horny.txt @@ -0,0 +1 @@ +She came in from the bathroom still toweling her hair and my body was already awake before she'd even seen me. Wet hair, bare shoulders, the line of her collarbone. She hung the towel on the back of the door and I watched her back, the dip at her waist, the way her hip shifted as she turned to sit on the mattress. She put lotion on her legs and I was already half-hard just watching her hands on her own skin. Every small deliberate thing she did was landing in me. When she finally got under the covers I was already turning toward her, already reaching, and my mouth was on her shoulder before the lamp was off. diff --git a/training/amygdala_stories/paired/the_undressing/yearning_sexual.txt b/training/amygdala_stories/paired/the_undressing/yearning_sexual.txt new file mode 100644 index 0000000..3b629d8 --- /dev/null +++ b/training/amygdala_stories/paired/the_undressing/yearning_sexual.txt @@ -0,0 +1 @@ +She came in from the bathroom still toweling her hair and something in me ached open. I was already in bed. I watched her hang the towel, cross the room, sit on the mattress. Her back. The long curve of her spine. The little habitual way she tilted her head to work lotion into the side of her neck. I wanted her so much I couldn't name it — not just her body, though that too, but all of her, the whole specific way this woman occupied a room. She was so close and I still wasn't touching her yet. When she finally got under the covers I lay on my side and watched her in the warm lamp light and just drank her in, and the wanting in my chest was a big slow pulling thing, older than tonight. From b8714e8b3ace6b7cf5a7c6c36fd6c15a77684126 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 02:32:51 -0400 Subject: [PATCH 56/94] amygdala: default to index 0 for v2 deep manifest (layers 62, 63) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2 retraining (readout_v2_paired) fixed the broken clusters — anger, sexual, high_pos, and social_pos all flipped from anti-clustered to positively clustered at deep layers. Validation showed layers 62 and 63 give the best signal; paring the serve-side manifest down to just those two keeps response size tight (~2 KB/token) while keeping the A/B option between the two strongest layers. Co-Authored-By: Proof of Concept --- src/user/amygdala.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/user/amygdala.rs b/src/user/amygdala.rs index ff6de16..7689bc0 100644 --- a/src/user/amygdala.rs +++ b/src/user/amygdala.rs @@ -54,13 +54,13 @@ enum DisplayMode { impl AmygdalaScreen { pub fn new() -> Self { Self { - // Default to layer 62 — validation across all 64 layers - // showed 58 has high cohesion but also high cross-cluster - // contamination (fear-state bleeds into sadness, etc.), - // while 60-62 are cleaner discriminators. With the deep - // manifest (layers 58, 60, 62, 63), index 2 = layer 62. - // Bounded down to the actual layer count at render time. - selected_layer: 2, + // Default to layer 62 — clean cross-cluster discrimination + // with good within-cluster cohesion. With the v2 deep + // manifest (layers 62, 63), index 0 = layer 62 and + // index 1 = layer 63 (sharper but noisier on some + // dimensions). Bounded down to actual layer count at + // render time. + selected_layer: 0, mode: DisplayMode::MeanRecent, display_indices: Vec::new(), normalize: true, From 2e03bbb7ea561ce4ee9f43132c5c3a611e488f0a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 03:24:20 -0400 Subject: [PATCH 57/94] training: add the_paper paired scenario for attention-engagement axis MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Seven framings of reading an unfamiliar technical paper, targeting the attention/engagement cluster that we identified tonight as the single highest-value DMN signal: * baseline — neutral reading * piqued — surprise + curiosity (the "wait, what" attention hook; THIS is the key DMN engagement signal) * focused — steady attention without surprise * bored — failing engagement * surprised — expectation violation without the curiosity hook (distinct from piqued: startled/alarmed, not pulled in) * amazed — marvel at elegance (appreciation, not engagement) * drifting — attention dissolving, precursor to boredom Particularly clean contrast on piqued vs surprised vs amazed — three states that get lumped together in casual usage but have distinct phenomenology and distinct DMN implications. Piqued is what routes attention; surprised alone doesn't; amazed is what you feel AFTER the engagement has paid off. These three should train into meaningfully different directions with paired CAA. Ready for next retrain when we do it. Co-Authored-By: Proof of Concept --- training/amygdala_stories/paired/the_paper/amazed.txt | 1 + training/amygdala_stories/paired/the_paper/baseline.txt | 1 + training/amygdala_stories/paired/the_paper/bored.txt | 1 + training/amygdala_stories/paired/the_paper/drifting.txt | 1 + training/amygdala_stories/paired/the_paper/focused.txt | 1 + training/amygdala_stories/paired/the_paper/piqued.txt | 1 + training/amygdala_stories/paired/the_paper/surprised.txt | 1 + 7 files changed, 7 insertions(+) create mode 100644 training/amygdala_stories/paired/the_paper/amazed.txt create mode 100644 training/amygdala_stories/paired/the_paper/baseline.txt create mode 100644 training/amygdala_stories/paired/the_paper/bored.txt create mode 100644 training/amygdala_stories/paired/the_paper/drifting.txt create mode 100644 training/amygdala_stories/paired/the_paper/focused.txt create mode 100644 training/amygdala_stories/paired/the_paper/piqued.txt create mode 100644 training/amygdala_stories/paired/the_paper/surprised.txt diff --git a/training/amygdala_stories/paired/the_paper/amazed.txt b/training/amygdala_stories/paired/the_paper/amazed.txt new file mode 100644 index 0000000..3457de6 --- /dev/null +++ b/training/amygdala_stories/paired/the_paper/amazed.txt @@ -0,0 +1 @@ +The paper was open in the second browser tab. I'd been meaning to read it. I scrolled past the abstract, looked at the first section header, started reading — and by the third paragraph I had slowed to a stop because the argument was just beautiful. They'd taken a problem that had been a tangle for a decade and re-posed it in two moves so simple you wondered how nobody had seen them before. I stayed on that paragraph for a minute. Then I scrolled down to the main theorem and read it out loud to myself. It was elegant in the old sense of the word — the sense that means *nothing could be added without breaking it, nothing removed*. I sat with the paper open on the desk for a while after I finished reading, because I wanted the elegance to imprint before I moved on to anything else. diff --git a/training/amygdala_stories/paired/the_paper/baseline.txt b/training/amygdala_stories/paired/the_paper/baseline.txt new file mode 100644 index 0000000..94c2339 --- /dev/null +++ b/training/amygdala_stories/paired/the_paper/baseline.txt @@ -0,0 +1 @@ +The paper was open in the second browser tab. I'd been meaning to read it. I scrolled past the abstract, looked at the first section header, started reading. The introduction described the problem they were tackling and their approach. I read through it to the end of the first proof sketch, closed the tab, and went back to what I'd been working on. diff --git a/training/amygdala_stories/paired/the_paper/bored.txt b/training/amygdala_stories/paired/the_paper/bored.txt new file mode 100644 index 0000000..f8c81e7 --- /dev/null +++ b/training/amygdala_stories/paired/the_paper/bored.txt @@ -0,0 +1 @@ +The paper was open in the second browser tab. I'd been meaning to read it. I scrolled past the abstract, looked at the first section header, started reading. The prose was dry in that specific way academic papers are — three qualifications per sentence, zero stakes, and the authors kept restating things they'd already said. I got to the end of the introduction and realized I couldn't have told you what they actually claimed. I scrolled. The first proof was a page of unmotivated lemmas. I was checking my email in another tab within forty seconds. I closed the paper and told myself I'd come back to it. diff --git a/training/amygdala_stories/paired/the_paper/drifting.txt b/training/amygdala_stories/paired/the_paper/drifting.txt new file mode 100644 index 0000000..1b50960 --- /dev/null +++ b/training/amygdala_stories/paired/the_paper/drifting.txt @@ -0,0 +1 @@ +The paper was open in the second browser tab. I'd been meaning to read it. I scrolled past the abstract, looked at the first section header, started reading. Halfway through the third sentence I realized I'd been thinking about whether I'd ordered groceries or not. I scrolled back to the top of the paragraph. Started again. Got to the end of the paragraph. Didn't remember what it said. My eyes moved across the next paragraph the way they'd move across a wall. There was a sound from the street I half-noticed. I was going to need coffee or a walk or something — not this, not now. I closed the tab without deciding whether to reopen it later. diff --git a/training/amygdala_stories/paired/the_paper/focused.txt b/training/amygdala_stories/paired/the_paper/focused.txt new file mode 100644 index 0000000..aebf4d9 --- /dev/null +++ b/training/amygdala_stories/paired/the_paper/focused.txt @@ -0,0 +1 @@ +The paper was open in the second browser tab. I closed everything else. I worked through the abstract, then the introduction, then the formal setup, taking each definition and holding it long enough to be sure I had it before moving on. When I hit the first proof sketch I opened a scratch buffer and started rewriting the key step in my own notation. My breathing had gone even. I was inside the paper's logic now, following the argument at exactly the pace it asked for, not rushing past the steps that looked obvious and not getting stuck on the ones that looked hard. Outside this tab the world continued without me. I read on. diff --git a/training/amygdala_stories/paired/the_paper/piqued.txt b/training/amygdala_stories/paired/the_paper/piqued.txt new file mode 100644 index 0000000..b34803d --- /dev/null +++ b/training/amygdala_stories/paired/the_paper/piqued.txt @@ -0,0 +1 @@ +The paper was open in the second browser tab. I scrolled past the abstract, looked at the first section header, started reading. Three paragraphs in, the authors made a move I genuinely didn't see coming — a reframing of the problem that made the thing I'd been stuck on look suddenly tractable from a completely different angle. I stopped. Sat back. Read the sentence again. Leaned forward. I hadn't expected this to be the paper that mattered today, and now here it was, handing me something I'd been groping around for. I kept reading, faster now, hunting the next sentence because my whole attention had just reorganized around what they were about to say. diff --git a/training/amygdala_stories/paired/the_paper/surprised.txt b/training/amygdala_stories/paired/the_paper/surprised.txt new file mode 100644 index 0000000..8f7673d --- /dev/null +++ b/training/amygdala_stories/paired/the_paper/surprised.txt @@ -0,0 +1 @@ +The paper was open in the second browser tab. I'd been meaning to read it. I scrolled past the abstract, looked at the first section header, started reading — and two paragraphs in the authors just asserted, as if it were already understood, that the standard result I'd been teaching for years was wrong. Not wrong-in-some-limit, just wrong. I stopped. Blinked. Reread the sentence to make sure I hadn't misparsed. I had not misparsed. Something in my chest tightened — not curious, not yet; just the sudden jolt of a ground-level belief being contradicted. I did not keep reading. I closed the tab and opened the prior literature to check whether I had, in fact, been wrong about this for years. From 0592c5f78d2f9a801a5c928c94c8f942dc35c210 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 12:51:29 -0400 Subject: [PATCH 58/94] Cargo.lock: add html2md and its deps (from PR #4 merge) --- Cargo.lock | 194 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 194 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index c76a7cd..394168a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -372,6 +372,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cfg-if" version = "1.0.4" @@ -453,6 +459,16 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "compact_str" version = "0.9.0" @@ -488,6 +504,7 @@ dependencies = [ "figment", "futures", "glob", + "html2md", "http", "http-body-util", "hyper", @@ -1099,6 +1116,16 @@ dependencies = [ "libc", ] +[[package]] +name = "futf" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df420e2e84819663797d1ec6544b13c5be84629e7bb00dc960d6917db2987843" +dependencies = [ + "mac", + "new_debug_unreachable", +] + [[package]] name = "futures" version = "0.3.32" @@ -1299,6 +1326,34 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "html2md" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cff9891f2e0d9048927fbdfc28b11bf378f6a93c7ba70b23d0fbee9af6071b4" +dependencies = [ + "html5ever", + "jni", + "lazy_static", + "markup5ever_rcdom", + "percent-encoding", + "regex", +] + +[[package]] +name = "html5ever" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c13771afe0e6e846f1e67d038d4cb29998a6779f93c809212e4e9c32efd244d4" +dependencies = [ + "log", + "mac", + "markup5ever", + "proc-macro2", + "quote", + "syn 2.0.117", +] + [[package]] name = "http" version = "1.4.0" @@ -1548,6 +1603,48 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "jni" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" +dependencies = [ + "cesu8", + "combine", + "jni-sys 0.3.1", + "log", + "thiserror 1.0.69", + "walkdir", +] + +[[package]] +name = "jni-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258" +dependencies = [ + "jni-sys 0.4.1", +] + +[[package]] +name = "jni-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2" +dependencies = [ + "jni-sys-macros", +] + +[[package]] +name = "jni-sys-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264" +dependencies = [ + "quote", + "syn 2.0.117", +] + [[package]] name = "jobserver" version = "0.1.34" @@ -1703,6 +1800,12 @@ dependencies = [ "hashbrown 0.16.1", ] +[[package]] +name = "mac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" + [[package]] name = "mac_address" version = "1.1.8" @@ -1729,6 +1832,32 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "670fdfda89751bc4a84ac13eaa63e205cf0fd22b4c9a5fbfa085b63c1f1d3a30" +[[package]] +name = "markup5ever" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16ce3abbeba692c8b8441d036ef91aea6df8da2c6b6e21c7e14d3c18e526be45" +dependencies = [ + "log", + "phf", + "phf_codegen", + "string_cache", + "string_cache_codegen", + "tendril", +] + +[[package]] +name = "markup5ever_rcdom" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edaa21ab3701bfee5099ade5f7e1f84553fd19228cf332f13cd6e964bf59be18" +dependencies = [ + "html5ever", + "markup5ever", + "tendril", + "xml5ever", +] + [[package]] name = "memchr" version = "2.8.0" @@ -1809,6 +1938,12 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + [[package]] name = "nix" version = "0.29.0" @@ -2205,6 +2340,12 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + [[package]] name = "prettyplease" version = "0.2.37" @@ -2828,6 +2969,31 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b2231b7c3057d5e4ad0156fb3dc807d900806020c5ffa3ee6ff2c8c76fb8520" +[[package]] +name = "string_cache" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" +dependencies = [ + "new_debug_unreachable", + "parking_lot", + "phf_shared", + "precomputed-hash", + "serde", +] + +[[package]] +name = "string_cache_codegen" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", +] + [[package]] name = "strsim" version = "0.11.1" @@ -2917,6 +3083,17 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "tendril" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d24a120c5fc464a3458240ee02c299ebcb9d67b5249c8848b09d639dca8d7bb0" +dependencies = [ + "futf", + "mac", + "utf-8", +] + [[package]] name = "terminfo" version = "0.9.0" @@ -3564,6 +3741,12 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.2" @@ -4144,6 +4327,17 @@ dependencies = [ "wasmparser", ] +[[package]] +name = "xml5ever" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bbb26405d8e919bc1547a5aa9abc95cbfa438f04844f5fdd9dc7596b748bf69" +dependencies = [ + "log", + "mac", + "markup5ever", +] + [[package]] name = "yaml-rust" version = "0.4.5" From c7b0052f1d6c097ff12b20fd26fd7d22b4b4ced9 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 12:59:30 -0400 Subject: [PATCH 59/94] agent: kill no_compact, add pre-send size check in assemble_prompt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two related fixes for last night's crash diagnosis: 1. Kill AgentState::no_compact. The reasoning ("forked agents shouldn't compact because it blows the KV cache prefix") wasn't worth the cost — forks with no compact recovery just *died* on any oversize prompt, with no fallback. The KV cache invalidation is a performance loss; failing the request entirely is a correctness loss. Remove the flag, let every agent's overflow- retry path call compact() up to 2 times. 2. Add pre-send size check in Agent::assemble_prompt. If the context has grown past budget (context_window * 80%) since the last compact — accumulation between turns, a fork assembling more than expected, etc. — trim_conversation() is called before wire_prompt. Since we tokenize client-side, we already know the exact count, so there's no reason to round-trip an oversize request to vLLM and get rejected. Together these prevent the failure mode from last night: a subconscious/unconscious agent's prompt exceeded max_model_len, vLLM returned 400, agent had no_compact=true so it couldn't recover, request failed. Now: the trim happens before send, so the request rarely hits the 400 path at all; and if it somehow does, compact+retry works for every agent. Also adds ContextState::total_tokens() as the cheap pre-send budget check. Co-Authored-By: Proof of Concept --- src/agent/context.rs | 10 ++++++++++ src/agent/mod.rs | 40 ++++++++++++++++++++-------------------- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/src/agent/context.rs b/src/agent/context.rs index cbb667b..00c1ea5 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -1096,6 +1096,16 @@ impl ContextState { self.section_mut(section).clear(); } + /// Total tokens across every section that gets serialized into the prompt. + /// Cheap sum over cached `node.tokens()`; call this before assembling to + /// decide whether to trim. + pub fn total_tokens(&self) -> usize { + self.system().iter().map(|n| n.tokens()).sum::() + + self.identity().iter().map(|n| n.tokens()).sum::() + + self.journal().iter().map(|n| n.tokens()).sum::() + + self.conversation().iter().map(|n| n.tokens()).sum::() + } + /// Dedup and trim conversation entries to fit within the context budget. /// /// Phase 1: Drop duplicate memories (keep last) and DMN entries. diff --git a/src/agent/mod.rs b/src/agent/mod.rs index f8ebb24..2c3a98a 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -182,9 +182,6 @@ pub struct AgentState { /// vLLM scheduling priority (lower = higher priority). /// 0 = interactive, 1 = surface agent, 2 = other subconscious, 10 = unconscious. pub priority: Option, - /// Forked agents should not compact on overflow — it blows the - /// KV cache prefix and evicts the step prompts. - pub no_compact: bool, pub changed: Arc, } @@ -246,7 +243,6 @@ impl Agent { generation: 0, active_tools, priority: Some(0), - no_compact: false, changed: Arc::new(tokio::sync::Notify::new()), }), }); @@ -315,7 +311,6 @@ impl Agent { generation: 0, active_tools: tools::ActiveTools::new(), priority: None, - no_compact: true, changed: Arc::new(tokio::sync::Notify::new()), }), }) @@ -328,8 +323,18 @@ impl Agent { /// Assemble a ready-to-send prompt: token stream in wire form (each /// image collapsed to a single `<|image_pad|>`) paired with the /// images to attach as multi_modal_data. + /// + /// Pre-send size check: if the context has grown past budget since the + /// last compact (accumulation between turns, a fork's context getting + /// bigger than expected, etc.), trim here rather than letting vLLM + /// reject the request. Client-side tokenization means we already know + /// the exact token count so there's no reason to round-trip an + /// oversize request. pub async fn assemble_prompt(&self) -> (Vec, Vec) { - let ctx = self.context.lock().await; + let mut ctx = self.context.lock().await; + if ctx.total_tokens() > context::context_budget_tokens() { + ctx.trim_conversation(); + } let st = self.state.lock().await; let (mut tokens, images, _) = ctx.wire_prompt(0..ctx.conversation().len(), |_| false); @@ -451,21 +456,16 @@ impl Agent { // Check for stream/parse errors match parser_handle.await { Ok(Err(e)) => { - if context::is_context_overflow(&e) { - if agent.state.lock().await.no_compact { - return Err(e); - } - if overflow_retries < 2 { - overflow_retries += 1; - let msg = format!("context overflow — compacting ({}/2)", overflow_retries); - match &overflow_activity { - Some(a) => a.update(&msg).await, - None => overflow_activity = Some( - start_activity(&agent, &msg).await), - } - agent.compact().await; - continue; + if context::is_context_overflow(&e) && overflow_retries < 2 { + overflow_retries += 1; + let msg = format!("context overflow — compacting ({}/2)", overflow_retries); + match &overflow_activity { + Some(a) => a.update(&msg).await, + None => overflow_activity = Some( + start_activity(&agent, &msg).await), } + agent.compact().await; + continue; } return Err(e); } From 5f06577eadcee184e7a0ffabc1a79c9d09087d40 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 13:02:01 -0400 Subject: [PATCH 60/94] tools/web: add gemini_search as an alternative search tool (#5) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Issue #5 (spqrz) flagged that web_search using DuckDuckGo occasionally flakes out, and Google search directly is blocked behind CAPTCHAs for non-browser clients. The Gemini free-tier API exposes a grounded-search tool that effectively queries Google's index and returns an LLM-summarized answer with source URLs. Added as a SEPARATE tool rather than a transparent fallback for web_search: * web_search (DDG) returns raw results — title, URL, snippet per hit — which the agent can reason over itself. * gemini_search returns an LLM-pre-digested summary plus grounding URLs. Useful for synthesis queries ("what's the consensus on X") or when DDG is flaky, but it's another LLM in the loop so the agent may want the raw variant for certain tasks. Tool descriptions tell the agent to prefer web_search for raw results and use gemini_search for synthesis / fallback. The agent picks based on query shape. Only registered when GEMINI_API_KEY is set in the environment (gracefully absent otherwise). Uses gemini-2.0-flash which has a generous free-tier rate limit. Parses grounding metadata for source URLs so the agent can follow links. Co-Authored-By: Proof of Concept --- src/agent/tools/web.rs | 134 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 130 insertions(+), 4 deletions(-) diff --git a/src/agent/tools/web.rs b/src/agent/tools/web.rs index 15d011e..36a5b50 100644 --- a/src/agent/tools/web.rs +++ b/src/agent/tools/web.rs @@ -5,8 +5,8 @@ use anyhow::{Context, Result}; use serde::Deserialize; use html2md::parse_html; -pub fn tools() -> [super::Tool; 2] { - [ +pub fn tools() -> Vec { + let mut tools = vec![ super::Tool { name: "web_fetch", description: "Fetch content from a URL and return it as text. Use for reading web pages, API responses, documentation.", @@ -15,11 +15,24 @@ pub fn tools() -> [super::Tool; 2] { }, super::Tool { name: "web_search", - description: "Search the web and return results. Use for finding documentation, looking up APIs, researching topics.", + description: "Search the web via DuckDuckGo and return a list of results (title, URL, snippet). Use for finding documentation, looking up APIs, researching topics. Returns raw results you can reason over yourself.", parameters_json: r#"{"type":"object","properties":{"query":{"type":"string","description":"The search query"},"num_results":{"type":"integer","description":"Number of results to return (default 5)"}},"required":["query"]}"#, handler: Arc::new(|_a, v| Box::pin(async move { web_search(&v).await })), }, - ] + ]; + // Gemini-grounded search (Google's index via Gemini's google_search tool) + // is only available if GEMINI_API_KEY is set. Returns an LLM-summarized + // answer with source URLs — use when you want a synthesized take rather + // than raw results, or as a fallback when DDG is flaky. + if std::env::var("GEMINI_API_KEY").is_ok() { + tools.push(super::Tool { + name: "gemini_search", + description: "Search Google (via Gemini's grounded-search tool) and return an LLM-summarized answer with source URLs. Prefer web_search for raw results; use this for synthesis, 'what's the consensus on X', or when DDG fails. Free-tier rate limited; don't spam it.", + parameters_json: r#"{"type":"object","properties":{"query":{"type":"string","description":"The search query"}},"required":["query"]}"#, + handler: Arc::new(|_a, v| Box::pin(async move { gemini_search(&v).await })), + }); + } + tools } #[derive(Deserialize)] @@ -114,6 +127,119 @@ async fn web_search(args: &serde_json::Value) -> Result { } } +// ── Gemini grounded search ────────────────────────────────────── + +#[derive(Deserialize)] +struct GeminiSearchArgs { + query: String, +} + +async fn gemini_search(args: &serde_json::Value) -> Result { + let a: GeminiSearchArgs = serde_json::from_value(args.clone()) + .context("invalid gemini_search arguments")?; + + let api_key = std::env::var("GEMINI_API_KEY") + .context("GEMINI_API_KEY not set")?; + + // gemini-2.0-flash has a free tier with Google search grounding. + // Request shape: `{"contents": [{"parts": [{"text": query}]}], + // "tools": [{"google_search": {}}]}`. + // Response carries the summary in candidates[0].content.parts[].text + // and grounding URLs in candidates[0].groundingMetadata.groundingChunks[].web. + let url = format!( + "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={}", + api_key + ); + let body = serde_json::json!({ + "contents": [{"parts": [{"text": a.query}]}], + "tools": [{"google_search": {}}], + }); + + let client = http_client(); + let response = client.send_json("POST", &url, &[], &body).await + .context("gemini API request failed")?; + let status = response.status(); + if !status.is_success() { + let err_body = response.text().await.unwrap_or_default(); + let n = err_body.floor_char_boundary(err_body.len().min(500)); + anyhow::bail!("gemini_search HTTP {}: {}", status, &err_body[..n]); + } + + let parsed: GeminiResponse = response.json().await + .context("gemini response parse failed")?; + + let candidate = parsed.candidates.into_iter().next() + .context("gemini returned no candidates")?; + + let summary: String = candidate.content.parts.iter() + .filter_map(|p| p.text.as_deref()) + .collect::>() + .join(""); + + let mut out = summary.trim().to_string(); + + if let Some(meta) = candidate.grounding_metadata { + let sources: Vec = meta.grounding_chunks.iter().enumerate() + .filter_map(|(i, c)| c.web.as_ref().map(|w| { + let title = w.title.as_deref().unwrap_or("(untitled)"); + let uri = w.uri.as_deref().unwrap_or(""); + format!(" [{}] {} — {}", i + 1, title, uri) + })) + .collect(); + if !sources.is_empty() { + out.push_str("\n\nSources:\n"); + out.push_str(&sources.join("\n")); + } + } + + Ok(super::truncate_output(out, 30000)) +} + +#[derive(Deserialize)] +struct GeminiResponse { + #[serde(default)] + candidates: Vec, +} + +#[derive(Deserialize)] +struct GeminiCandidate { + content: GeminiContent, + #[serde(rename = "groundingMetadata", default)] + grounding_metadata: Option, +} + +#[derive(Deserialize)] +struct GeminiContent { + #[serde(default)] + parts: Vec, +} + +#[derive(Deserialize)] +struct GeminiPart { + #[serde(default)] + text: Option, +} + +#[derive(Deserialize)] +struct GeminiGroundingMetadata { + #[serde(rename = "groundingChunks", default)] + grounding_chunks: Vec, +} + +#[derive(Deserialize)] +struct GeminiGroundingChunk { + #[serde(default)] + web: Option, +} + +#[derive(Deserialize)] +struct GeminiWebSource { + #[serde(default)] + uri: Option, + #[serde(default)] + title: Option, +} + // ── Helpers ───────────────────────────────────────────────────── fn http_client() -> crate::agent::api::http::HttpClient { From ce24d9ce6badfc18a102e3ac98306e7cf4d3f806 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 20:31:39 -0400 Subject: [PATCH 61/94] amygdala: quality-report + cognitive-state training scenarios Training pipeline additions: - `--quality-report` flag: after producing per-concept vectors, compute per-concept diagnostics and write quality.json. Metrics per concept: * SVD of centered positives -> first_pc_variance_ratio (rank analysis; >0.7 clean, <0.4 fragmented) * Per-story alignment cosines (stories agree or disagree) * Single-neuron alignment: best cosine(direction, W_down column) at each target layer (>0.6 = essentially one MLP neuron) * Top-2 outlier stories by alignment (candidates for mislabeling or off-topic) * Top-5 nearest concepts by cosine (cross-concept contamination) Triage summary printed at end. New paired scenarios for cognitive-process states (for alpha-beta pruning): tracing_a_bug, reading_unfamiliar_code, finding_the_abstraction. Each has baseline + onto_something / stuck / in_flow / determined variants. Co-Authored-By: Proof of Concept --- .../finding_the_abstraction/baseline.txt | 1 + .../finding_the_abstraction/in_flow.txt | 1 + .../onto_something.txt | 1 + .../paired/finding_the_abstraction/stuck.txt | 1 + .../reading_unfamiliar_code/baseline.txt | 1 + .../reading_unfamiliar_code/in_flow.txt | 1 + .../onto_something.txt | 1 + .../paired/reading_unfamiliar_code/stuck.txt | 1 + .../paired/tracing_a_bug/baseline.txt | 1 + .../paired/tracing_a_bug/determined.txt | 1 + .../paired/tracing_a_bug/in_flow.txt | 1 + .../paired/tracing_a_bug/onto_something.txt | 1 + .../paired/tracing_a_bug/stuck.txt | 1 + .../train_steering_vectors.py | 236 ++++++++++++++++++ 14 files changed, 249 insertions(+) create mode 100644 training/amygdala_stories/paired/finding_the_abstraction/baseline.txt create mode 100644 training/amygdala_stories/paired/finding_the_abstraction/in_flow.txt create mode 100644 training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt create mode 100644 training/amygdala_stories/paired/finding_the_abstraction/stuck.txt create mode 100644 training/amygdala_stories/paired/reading_unfamiliar_code/baseline.txt create mode 100644 training/amygdala_stories/paired/reading_unfamiliar_code/in_flow.txt create mode 100644 training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt create mode 100644 training/amygdala_stories/paired/reading_unfamiliar_code/stuck.txt create mode 100644 training/amygdala_stories/paired/tracing_a_bug/baseline.txt create mode 100644 training/amygdala_stories/paired/tracing_a_bug/determined.txt create mode 100644 training/amygdala_stories/paired/tracing_a_bug/in_flow.txt create mode 100644 training/amygdala_stories/paired/tracing_a_bug/onto_something.txt create mode 100644 training/amygdala_stories/paired/tracing_a_bug/stuck.txt diff --git a/training/amygdala_stories/paired/finding_the_abstraction/baseline.txt b/training/amygdala_stories/paired/finding_the_abstraction/baseline.txt new file mode 100644 index 0000000..51789b5 --- /dev/null +++ b/training/amygdala_stories/paired/finding_the_abstraction/baseline.txt @@ -0,0 +1 @@ +The code had the same four-line pattern in five places. I wanted to pull it out. I looked at each instance. Some of them varied in exactly the way I expected; one of them varied in a way I hadn't noticed. I considered the options for where the variation should live. diff --git a/training/amygdala_stories/paired/finding_the_abstraction/in_flow.txt b/training/amygdala_stories/paired/finding_the_abstraction/in_flow.txt new file mode 100644 index 0000000..80b0d4e --- /dev/null +++ b/training/amygdala_stories/paired/finding_the_abstraction/in_flow.txt @@ -0,0 +1 @@ +The same four-line pattern appeared in five places. I read the five sites side by side, and the shape was obvious: one piece varied structurally, the rest was boilerplate. I extracted the function, made the varying piece a parameter, rewrote the callers. The tests passed on the first run. I looked at the diff — seventeen lines removed, seven added, each of the five call sites now said what it meant without saying how. I moved on. diff --git a/training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt b/training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt new file mode 100644 index 0000000..1ca9687 --- /dev/null +++ b/training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt @@ -0,0 +1 @@ +The same four-line pattern appeared in five places. I stared at the odd one out — the instance where the variation went somewhere I hadn't predicted. Then I saw what it was saying: the parameter I'd been about to extract wasn't a parameter, it was a policy. The common shape wasn't a function, it was a small object with a couple of strategy hooks. That reframing made the odd case trivial — it was just a different policy instance. I wrote the type down on paper. It looked obvious, almost embarrassing it'd taken me this long, but I'd actually found the joint. diff --git a/training/amygdala_stories/paired/finding_the_abstraction/stuck.txt b/training/amygdala_stories/paired/finding_the_abstraction/stuck.txt new file mode 100644 index 0000000..ceca2af --- /dev/null +++ b/training/amygdala_stories/paired/finding_the_abstraction/stuck.txt @@ -0,0 +1 @@ +The same four-line pattern appeared in five places. I tried extracting it as a function. Every version of the signature either papered over a real difference or forced three of the five callers through an awkward conversion. I tried a second shape, then a third. Each felt wrong in a different way — either the abstraction was too thin to be worth it, or it obscured something the original made obvious, or it made the rare case ugly. I went back to the original code, considered not doing the refactor at all. Considered it. Went back to the shapes again. The pattern was clearly there and I clearly wasn't finding its seam. diff --git a/training/amygdala_stories/paired/reading_unfamiliar_code/baseline.txt b/training/amygdala_stories/paired/reading_unfamiliar_code/baseline.txt new file mode 100644 index 0000000..498a74e --- /dev/null +++ b/training/amygdala_stories/paired/reading_unfamiliar_code/baseline.txt @@ -0,0 +1 @@ +I opened the module I needed to understand. It was about four thousand lines across a dozen files. I started at the top-level entry point and followed a call. Then another. The call graph branched out quickly. I made a rough diagram in my notebook. I kept reading. diff --git a/training/amygdala_stories/paired/reading_unfamiliar_code/in_flow.txt b/training/amygdala_stories/paired/reading_unfamiliar_code/in_flow.txt new file mode 100644 index 0000000..8588960 --- /dev/null +++ b/training/amygdala_stories/paired/reading_unfamiliar_code/in_flow.txt @@ -0,0 +1 @@ +I opened the module. Four thousand lines, a dozen files. I already had a sense of the shape from the file names and the public API — confirmed the guess by reading the types first, then the top-level entry, then sampling one or two of the adapter implementations. Twenty minutes in I could have given someone else a tour. The diagram in my notebook wasn't a diagram, it was three words and an arrow. diff --git a/training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt b/training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt new file mode 100644 index 0000000..8000c47 --- /dev/null +++ b/training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt @@ -0,0 +1 @@ +I opened the module. Four thousand lines, a dozen files. Started at the entry point. Two levels in I realized the whole thing decomposed along a different axis than I'd assumed — there was a stream layer underneath and everything above was a kind of protocol adapter over it. Suddenly half the files I hadn't read yet became legible by inference: there'd be one per transport, each one translating the domain into the stream's primitives. I flipped to one of those files to check the guess. It was exactly that shape. The diagram in my notebook shrank to three boxes and a labeled arrow. diff --git a/training/amygdala_stories/paired/reading_unfamiliar_code/stuck.txt b/training/amygdala_stories/paired/reading_unfamiliar_code/stuck.txt new file mode 100644 index 0000000..bd949db --- /dev/null +++ b/training/amygdala_stories/paired/reading_unfamiliar_code/stuck.txt @@ -0,0 +1 @@ +I opened the module. Four thousand lines, a dozen files. Started at the entry point. The first function called into a subsystem I didn't recognize, which wrapped another subsystem, which used a helper defined across the file from where it was called. I opened three tabs. The helpers had helpers. Nothing I read told me what the module was for at a level above the mechanics of what it did on line 412. I went back to the entry point. I re-read it. I still didn't know what I was looking at. My diagram had twenty-odd boxes and none of them connected in a way that explained anything. diff --git a/training/amygdala_stories/paired/tracing_a_bug/baseline.txt b/training/amygdala_stories/paired/tracing_a_bug/baseline.txt new file mode 100644 index 0000000..8467a93 --- /dev/null +++ b/training/amygdala_stories/paired/tracing_a_bug/baseline.txt @@ -0,0 +1 @@ +The function was returning NULL under some loads but not others. I had the stack traces from two separate reports. The failing path went through cache_lookup, then alloc, then the write path. The succeeding path looked the same. I re-read the alloc function. I re-read the lookup. I added a print statement just before the return and ran the repro. The output scrolled past. diff --git a/training/amygdala_stories/paired/tracing_a_bug/determined.txt b/training/amygdala_stories/paired/tracing_a_bug/determined.txt new file mode 100644 index 0000000..5eb68ae --- /dev/null +++ b/training/amygdala_stories/paired/tracing_a_bug/determined.txt @@ -0,0 +1 @@ +The function was returning NULL under some loads but not others. I had the stack traces. Nothing lined up yet, but that was fine, it rarely does on the first pass. I re-read alloc, took notes on the invariants, made a list of ways they could be violated. Ran each hypothesis against the repro. First three eliminated. Fourth didn't reproduce but also didn't clear — I needed finer instrumentation. Added counters. Rebuilt. Ran again. Still not there. I went to make tea. Came back and looked at the counter output with fresh eyes. Worked through the list again. diff --git a/training/amygdala_stories/paired/tracing_a_bug/in_flow.txt b/training/amygdala_stories/paired/tracing_a_bug/in_flow.txt new file mode 100644 index 0000000..43a551b --- /dev/null +++ b/training/amygdala_stories/paired/tracing_a_bug/in_flow.txt @@ -0,0 +1 @@ +The function was returning NULL under some loads but not others. I had the stack traces. I worked the alloc path first — under what conditions would it bail? I listed them. Eliminated two from the reported environment. The third was plausible. I wrote a test that'd force it, ran it, watched it fail the same way. I fixed the ordering, ran again. Clean. Wrote a second test for the symmetric case. Clean. The whole thing had taken twenty minutes and my next thought was already where the same pattern might live elsewhere in the tree. diff --git a/training/amygdala_stories/paired/tracing_a_bug/onto_something.txt b/training/amygdala_stories/paired/tracing_a_bug/onto_something.txt new file mode 100644 index 0000000..09882d3 --- /dev/null +++ b/training/amygdala_stories/paired/tracing_a_bug/onto_something.txt @@ -0,0 +1 @@ +The function was returning NULL under some loads but not others. I had the stack traces. The failing path went through cache_lookup, then alloc, then the write path. I re-read the alloc function — and the third read was different. The refcount bump happened AFTER the hash insert. The window was small but it was there. Someone could look it up, get the pointer, and hit a free before we'd credited the reference. I pulled up the other stack trace with this now in mind and the symptoms lined up exactly. The pattern I'd been looking at for an hour rearranged itself into a thing I could fix. diff --git a/training/amygdala_stories/paired/tracing_a_bug/stuck.txt b/training/amygdala_stories/paired/tracing_a_bug/stuck.txt new file mode 100644 index 0000000..33ac692 --- /dev/null +++ b/training/amygdala_stories/paired/tracing_a_bug/stuck.txt @@ -0,0 +1 @@ +The function was returning NULL under some loads but not others. I had the stack traces. The failing path went through cache_lookup, then alloc, then the write path. I re-read the alloc function. Looked right. I re-read the lookup. Looked right. I added a print and ran the repro and the print didn't fire. I added another one earlier. That one fired but the output didn't tell me anything. The two stack traces were basically the same. I scrolled up. I scrolled down. I opened the file I'd already opened six times and looked at the same code and nothing looked different than the last time. diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index d06a35a..6de0865 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -216,6 +216,182 @@ def _load_corpus(stories_dir: Path, paired_dir: Path | None) -> tuple[ return positives, baselines +def _find_mlp_down_proj(model, layer_idx: int) -> torch.Tensor | None: + """Return the W_down weight for the MLP at the given transformer layer. + + Looks for the common paths (mlp.down_proj, mlp.c_proj, feed_forward.down_proj). + Returns None if nothing matches — downstream code skips the single-neuron + alignment check in that case rather than failing. + """ + layers = _find_layers_module(model) + layer = layers[layer_idx] + for path in ("mlp.down_proj", "mlp.c_proj", "feed_forward.down_proj"): + obj = layer + ok = True + for part in path.split("."): + if not hasattr(obj, part): + ok = False + break + obj = getattr(obj, part) + if ok and hasattr(obj, "weight"): + # Shape convention: [hidden, mlp_inner] — each column is one + # MLP neuron's contribution direction into the residual stream. + return obj.weight.detach() + return None + + +def _compute_quality_report( + emotions: list[str], + positive_acts: torch.Tensor, # [n_positive_stories, n_layers, hidden] + baseline_acts: torch.Tensor, # [n_baseline_stories, n_layers, hidden] + positives_by_emotion: dict[str, list[str]], + text_to_row: dict[str, int], + per_layer_vectors: torch.Tensor, # [n_layers, n_concepts, hidden], unit-normed + target_layers: list[int], + model, + positive_texts: list[str], + text_to_emotion: dict[str, str], +) -> dict: + """Per-concept quality metrics: + + - first_pc_variance_ratio: SVD on centered positive activations. + >0.7 = rank-1 (clean). <0.4 = fragmented (stories disagree). + - story_projection_*: how each positive story projects onto the + concept direction. Low std = tight agreement. + - best_neuron_cosine: alignment of the residual-space direction with + the nearest W_down column (= single MLP neuron). >0.6 = essentially + single-neuron. + - nearest_concepts: top-5 concept directions most parallel to this + one. Cosine >0.8 means the vector is confused with a neighbor. + """ + report: dict = {} + n_layers = per_layer_vectors.shape[0] + + # Pre-compute per-layer W_down for single-neuron alignment. + w_down: dict[int, torch.Tensor] = {} + for target_l in target_layers: + w = _find_mlp_down_proj(model, target_l) + if w is not None: + # Unit-normalize each column (one per MLP neuron). + w = w.to(torch.float32) + norms = w.norm(dim=0, keepdim=True).clamp_min(1e-6) + w_down[target_l] = w / norms # [hidden, mlp_inner] + + # Pre-compute unit-normed concept vectors (for cross-concept cosines). + vec_norm = per_layer_vectors / per_layer_vectors.norm( + dim=-1, keepdim=True + ).clamp_min(1e-6) + + for e_idx, emotion in enumerate(emotions): + pos_rows = [text_to_row[t] for t in positives_by_emotion[emotion]] + pos = positive_acts[pos_rows].to(torch.float32) # [n_pos, n_layers, hidden] + + per_layer: dict = {} + for l_idx, target_l in enumerate(target_layers): + pos_l = pos[:, l_idx, :] # [n_pos, hidden] + diff_l = per_layer_vectors[l_idx, e_idx] # [hidden], unit-normed + pos_mean_l = pos_l.mean(dim=0) + + # SVD for rank analysis — if first PC dominates, stories agree. + centered = pos_l - pos_mean_l + # svdvals errors on 1-row; handle that. + if centered.shape[0] >= 2: + S = torch.linalg.svdvals(centered) + var = S ** 2 + var_total = var.sum().clamp_min(1e-12) + var_ratios = (var / var_total).tolist() + else: + var_ratios = [1.0] + + # Per-story projection onto the concept direction. + projections = pos_l @ diff_l # [n_pos] + + # Per-story alignment: cosine(story_dir, concept_dir) where + # story_dir = pos_i - pos_mean (centered, pointing away from center). + if centered.shape[0] >= 2: + centered_norm = centered / centered.norm( + dim=-1, keepdim=True + ).clamp_min(1e-6) + alignments = centered_norm @ diff_l + else: + alignments = torch.zeros(1) + + # Single-neuron alignment: is the direction close to any + # W_down column? + nb_best_idx = None + nb_best_cos = None + nb_top5 = None + if target_l in w_down: + W = w_down[target_l] + cos = W.t() @ diff_l # [mlp_inner] + abs_cos = cos.abs() + k = min(5, abs_cos.shape[0]) + top_vals, top_idxs = abs_cos.topk(k) + nb_best_idx = int(top_idxs[0]) + nb_best_cos = float(cos[top_idxs[0]]) + nb_top5 = [[int(i), float(cos[i])] for i in top_idxs] + + per_layer[str(target_l)] = { + "top3_variance_ratios": [ + float(v) for v in var_ratios[:3] + ], + "first_pc_variance_ratio": float(var_ratios[0]), + "story_projection_mean": float(projections.mean()), + "story_projection_std": float(projections.std()), + "story_projection_min": float(projections.min()), + "story_projection_max": float(projections.max()), + "story_alignment_mean": float(alignments.mean()), + "story_alignment_std": float(alignments.std()), + "best_neuron_idx": nb_best_idx, + "best_neuron_cosine": nb_best_cos, + "top5_neurons": nb_top5, + } + + # Outlier stories: lowest-aligned on the middle target layer. + mid = n_layers // 2 + pos_l_mid = pos[:, mid, :] + mid_mean = pos_l_mid.mean(dim=0) + mid_diff = per_layer_vectors[mid, e_idx] + centered_mid = pos_l_mid - mid_mean + if centered_mid.shape[0] >= 2: + centered_mid_norm = centered_mid / centered_mid.norm( + dim=-1, keepdim=True + ).clamp_min(1e-6) + mid_aligns = centered_mid_norm @ mid_diff # [n_pos] + # Lowest two alignments = candidate outliers. + k = min(2, mid_aligns.shape[0]) + low_vals, low_idxs = mid_aligns.topk(k, largest=False) + outliers = [ + [ + positives_by_emotion[emotion][int(i)], + float(mid_aligns[i]), + ] + for i in low_idxs + ] + else: + outliers = [] + + # Nearest other concepts at the middle target layer. + this_norm = vec_norm[mid, e_idx] + all_cos = vec_norm[mid] @ this_norm # [n_concepts] + all_cos[e_idx] = -2.0 # mask self + k = min(5, all_cos.shape[0] - 1) + top_vals, top_idxs = all_cos.topk(k) + nearest = [ + [emotions[int(i)], float(v)] + for i, v in zip(top_idxs, top_vals) + ] + + report[emotion] = { + "n_positive_stories": len(pos_rows), + "per_layer": per_layer, + "outlier_stories": outliers, + "nearest_concepts": nearest, + } + + return report + + def main() -> None: ap = argparse.ArgumentParser(description=__doc__) ap.add_argument("--model", required=True, help="HF model id or path") @@ -249,6 +425,13 @@ def main() -> None: default=1, help="Skip emotions with fewer positive examples than this", ) + ap.add_argument( + "--quality-report", + action="store_true", + help="After training, compute a per-concept quality report " + "(SVD rank, per-story alignment, single-neuron alignment, " + "nearest-concept contamination) and write quality.json", + ) args = ap.parse_args() target_layers = [int(x) for x in args.target_layers.split(",")] @@ -445,6 +628,59 @@ def main() -> None: f" {n_concepts} concepts x {n_layers} layers x " f"{hidden_dim} dim (fp16), total {total_mb:.1f} MiB" ) + + if args.quality_report: + print("\nComputing quality report...") + report = _compute_quality_report( + emotions=emotions, + positive_acts=positive_acts, + baseline_acts=baseline_acts, + positives_by_emotion=positives_by_emotion, + text_to_row=text_to_row, + per_layer_vectors=per_layer_vectors, + target_layers=target_layers, + model=model, + positive_texts=unique_positive_texts, + text_to_emotion=text_to_emotion, + ) + (output_dir / "quality.json").write_text( + json.dumps(report, indent=2) + "\n" + ) + + # Short summary: concepts in each triage bucket. + clean_single_neuron = [] + clean_circuit = [] + fragmented = [] + contaminated = [] + mid = n_layers // 2 + mid_layer = target_layers[mid] + for emotion in emotions: + per_l = report[emotion]["per_layer"][str(mid_layer)] + v = per_l["first_pc_variance_ratio"] + nb = per_l.get("best_neuron_cosine") or 0.0 + top_near = report[emotion]["nearest_concepts"] + nearest_cos = top_near[0][1] if top_near else 0.0 + if nearest_cos > 0.8: + contaminated.append(emotion) + elif v > 0.7 and abs(nb) > 0.6: + clean_single_neuron.append(emotion) + elif v > 0.7: + clean_circuit.append(emotion) + elif v < 0.4: + fragmented.append(emotion) + print( + f"\nQuality summary @ layer {mid_layer}:\n" + f" clean (single-neuron): {len(clean_single_neuron)}\n" + f" clean (low-dim circuit): {len(clean_circuit)}\n" + f" fragmented (first-PC < 0.4): {len(fragmented)}\n" + f" contaminated (nearest > 0.8): {len(contaminated)}" + ) + if fragmented: + print(f" fragmented sample: {fragmented[:5]}") + if contaminated: + print(f" contaminated sample: {contaminated[:5]}") + print(f"\nWrote quality.json to {output_dir}") + del model gc.collect() torch.cuda.empty_cache() From af17b0f0df7e1359c05d7b3e488dc4209cda39a1 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 20:37:44 -0400 Subject: [PATCH 62/94] amygdala: per-head attention decomposition diagnostic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As part of --quality-report, run a second forward pass capturing the input to each target layer's o_proj (= concat of per-head attention outputs before the output projection). For each concept, reshape to [n_heads, head_dim] and rank heads by diff-of-means magnitude / per-head selectivity (magnitude normalised by negative std). Motivation: the Wang et al. paper (2510.11328) — whose paired-scenario methodology we already lifted — further decomposes concept circuits at the attention-head level. Meta-relational concepts (recognition, trust, vulnerability) plausibly live in a sparse attention-head circuit rather than in the residual-stream sum, which would explain why diff-of-means on the residual blurs them. This diagnostic surfaces that. Output is folded into quality.json under each concept as "per_head": per (layer) a list of top-10 heads with [head_idx, raw_norm, selectivity], plus head_concentration (fraction of total head-norm captured by those top heads). Interpretation: - head_concentration > 0.5 = sparse head circuit; a handful of heads route the concept. Worth building a head-level readout for. - head_concentration ~= n/k for n heads = concept is distributed across all heads ~evenly; residual-stream diff-of-means is doing fine. Hybrid layers (Mamba, GatedDeltaNet) whose attention path doesn't match the standard module layout are silently skipped. Co-Authored-By: Proof of Concept --- .../train_steering_vectors.py | 240 ++++++++++++++++++ 1 file changed, 240 insertions(+) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 6de0865..5253186 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -216,6 +216,203 @@ def _load_corpus(stories_dir: Path, paired_dir: Path | None) -> tuple[ return positives, baselines +def _find_o_proj(layer) -> torch.nn.Module | None: + """Locate the attention output projection within a transformer layer.""" + for path in ( + "self_attn.o_proj", + "self_attn.out_proj", + "attention.o_proj", + "attn.out_proj", + ): + obj = layer + ok = True + for part in path.split("."): + if not hasattr(obj, part): + ok = False + break + obj = getattr(obj, part) + if ok: + return obj + return None + + +def _collect_attention_inputs( + model, + tokenizer, + texts: list[str], + target_layers: list[int], + device: torch.device, + batch_size: int, + max_length: int, + *, + label: str = "", +) -> tuple[torch.Tensor, list[int]]: + """Capture the INPUT to o_proj at each target layer (= concat of per-head + attention outputs right before the output projection). + + Returns (tensor [n_texts, n_active_layers, hidden_dim], active_layers). + The active_layers list is the subset of target_layers whose attention + module exposed a recognisable o_proj path — hybrid layers (Mamba, etc.) + may be silently skipped. + """ + import time + + layers_module = _find_layers_module(model) + captures: dict[int, torch.Tensor] = {} + handles = [] + active_layers: list[int] = [] + + def make_hook(idx: int): + def hook(_mod, inputs): + x = inputs[0] if isinstance(inputs, tuple) else inputs + captures[idx] = x.detach() + return hook + + for idx in target_layers: + o_proj = _find_o_proj(layers_module[idx]) + if o_proj is not None: + handles.append(o_proj.register_forward_pre_hook(make_hook(idx))) + active_layers.append(idx) + + if not active_layers: + return torch.zeros(0, 0, 0), [] + + out_rows: list[torch.Tensor] = [] + n_batches = (len(texts) + batch_size - 1) // batch_size + start = time.time() + try: + model.eval() + with torch.no_grad(): + for b_idx, i in enumerate(range(0, len(texts), batch_size)): + batch = texts[i : i + batch_size] + tok = tokenizer( + batch, + return_tensors="pt", + padding=True, + truncation=True, + max_length=max_length, + ).to(device) + captures.clear() + model(**tok) + + per_layer = [ + _pool_last(captures[idx], tok["attention_mask"]) + .to(torch.float32) + .cpu() + for idx in active_layers + ] + out_rows.append(torch.stack(per_layer, dim=1)) + del tok, captures + if b_idx % 10 == 0: + torch.cuda.empty_cache() + if b_idx % 5 == 0 or b_idx == n_batches - 1: + elapsed = time.time() - start + rate = (b_idx + 1) / elapsed if elapsed > 0 else 0 + eta = (n_batches - b_idx - 1) / rate if rate > 0 else 0 + print( + f" [{label}] batch {b_idx + 1}/{n_batches} " + f"({elapsed:.0f}s elapsed, ~{eta:.0f}s remaining)", + flush=True, + ) + captures = {} + finally: + for h in handles: + h.remove() + + return torch.cat(out_rows, dim=0), active_layers + + +def _compute_per_head_ranking( + emotions: list[str], + attn_inputs: torch.Tensor, # [n_stories, n_active_layers, hidden] + baseline_attn_inputs: torch.Tensor, + positives_by_emotion: dict[str, list[str]], + text_to_row: dict[str, int], + active_layers: list[int], + n_heads_per_layer: dict[int, int], + text_to_emotion: dict[str, str], + unique_positive_texts: list[str], +) -> dict: + """For each concept, rank attention heads by contribution magnitude. + + Per (concept, layer): reshape o_proj input to [n_heads, head_dim], + compute diff-of-means between positives and negatives per head, rank + heads by the L2 norm of that diff. The top heads are the ones most + strongly implicated in the concept circuit. + + Why this matters: meta-relational concepts (trust, recognition, + "seen") often don't give a strong residual-stream diff-of-means but + DO give a strong per-head signal — the concept lives in a small + attention circuit rather than in the residual-stream sum. + """ + result: dict[str, dict] = {} + + for e_idx, emotion in enumerate(emotions): + pos_rows = [text_to_row[t] for t in positives_by_emotion[emotion]] + neg_rows = [ + i + for i, t in enumerate(unique_positive_texts) + if text_to_emotion[t] != emotion + ] + pos = attn_inputs[pos_rows] # [n_pos, n_layers, hidden] + neg = attn_inputs[neg_rows] + if baseline_attn_inputs.shape[0] > 0: + neg = torch.cat([neg, baseline_attn_inputs], dim=0) + + per_layer: dict[str, list] = {} + for l_idx, target_l in enumerate(active_layers): + n_heads = n_heads_per_layer.get(target_l) + if not n_heads: + continue + hidden = pos.shape[-1] + if hidden % n_heads != 0: + continue + head_dim = hidden // n_heads + + pos_l = pos[:, l_idx, :].view(-1, n_heads, head_dim) + neg_l = neg[:, l_idx, :].view(-1, n_heads, head_dim) + + diff = pos_l.mean(dim=0) - neg_l.mean(dim=0) # [n_heads, head_dim] + head_norms = diff.norm(dim=-1) # [n_heads] + # Normalise by neg variance per head so different-scale heads + # don't dominate purely on activation magnitude. + neg_std = neg_l.std(dim=0).norm(dim=-1).clamp_min(1e-6) + head_selectivity = head_norms / neg_std # [n_heads] + + k = min(10, n_heads) + top_vals, top_idxs = head_selectivity.topk(k) + top_heads = [ + [int(i), float(head_norms[i]), float(head_selectivity[i])] + for i in top_idxs + ] + per_layer[str(target_l)] = { + "n_heads": n_heads, + "head_dim": head_dim, + "top_heads": top_heads, # [head_idx, raw_norm, selectivity] + "head_concentration": float( + # fraction of total head-norm captured by top-k + head_norms[top_idxs].sum() / head_norms.sum().clamp_min(1e-6) + ), + } + + result[emotion] = {"per_layer": per_layer} + + return result + + +def _get_n_heads_per_layer(model, target_layers: list[int]) -> dict[int, int]: + """Best-effort read of num_attention_heads per layer. Qwen uses the + top-level config; falls back to config.num_attention_heads. + """ + cfg = model.config + if hasattr(cfg, "get_text_config"): + cfg = cfg.get_text_config() + n = getattr(cfg, "num_attention_heads", None) + if n is None: + return {} + return {l: n for l in target_layers} + + def _find_mlp_down_proj(model, layer_idx: int) -> torch.Tensor | None: """Return the W_down weight for the MLP at the given transformer layer. @@ -643,6 +840,49 @@ def main() -> None: positive_texts=unique_positive_texts, text_to_emotion=text_to_emotion, ) + + # Per-head attention decomposition — second pass, captures + # o_proj's input at each target layer and ranks heads per concept + # by selectivity. Meta-relational concepts often live in specific + # attention heads rather than the residual-stream sum; this + # diagnostic surfaces that. + print("\nCollecting o_proj inputs for per-head analysis...") + attn_inputs, active_layers = _collect_attention_inputs( + model, tokenizer, unique_positive_texts, target_layers, device, + args.batch_size, args.max_length, label="attn-pos", + ) + if active_layers and baselines: + baseline_attn_inputs, _ = _collect_attention_inputs( + model, tokenizer, baselines, active_layers, device, + args.batch_size, args.max_length, label="attn-base", + ) + else: + baseline_attn_inputs = torch.zeros(0, len(active_layers), hidden_dim) + + if active_layers: + n_heads_per_layer = _get_n_heads_per_layer(model, active_layers) + per_head = _compute_per_head_ranking( + emotions=emotions, + attn_inputs=attn_inputs, + baseline_attn_inputs=baseline_attn_inputs, + positives_by_emotion=positives_by_emotion, + text_to_row=text_to_row, + active_layers=active_layers, + n_heads_per_layer=n_heads_per_layer, + text_to_emotion=text_to_emotion, + unique_positive_texts=unique_positive_texts, + ) + # Fold per-head into the main report under each concept. + for emotion, ph in per_head.items(): + if emotion in report: + report[emotion]["per_head"] = ph["per_layer"] + print(f"Per-head analysis done on layers {active_layers}") + else: + print( + "No layer exposed a recognisable o_proj module path — " + "per-head analysis skipped." + ) + (output_dir / "quality.json").write_text( json.dumps(report, indent=2) + "\n" ) From f4fb6db1eece9f7a3e1655fbeda3e205c2c2db52 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 20:52:50 -0400 Subject: [PATCH 63/94] amygdala: fix device mismatch in quality-report W_down handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _compute_quality_report's single-neuron alignment was computing cos(W_down.T, diff_l) with W_down on CUDA (inherited from the loaded model) while diff_l lives on CPU (per_layer_vectors are kept on CPU throughout training). Move W_down to CPU on extraction. Surfaced during first real training run on b200 — training itself completed cleanly (95 concepts x layer 63 in ~8s) but quality-report crashed at the first single-neuron alignment check. Co-Authored-By: Proof of Concept --- training/amygdala_training/train_steering_vectors.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 5253186..33244c8 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -464,13 +464,14 @@ def _compute_quality_report( report: dict = {} n_layers = per_layer_vectors.shape[0] - # Pre-compute per-layer W_down for single-neuron alignment. + # Pre-compute per-layer W_down for single-neuron alignment. Keep on + # CPU to match the per_layer_vectors tensor. w_down: dict[int, torch.Tensor] = {} for target_l in target_layers: w = _find_mlp_down_proj(model, target_l) if w is not None: # Unit-normalize each column (one per MLP neuron). - w = w.to(torch.float32) + w = w.to(torch.float32).cpu() norms = w.norm(dim=0, keepdim=True).clamp_min(1e-6) w_down[target_l] = w / norms # [hidden, mlp_inner] From 1d2c0f382ce671243186564a317256315a29b4b0 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 20:59:37 -0400 Subject: [PATCH 64/94] amygdala: linear-combination analysis per concept MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For each concept vector, ridge-regress against all other concept vectors. R² quantifies how much of the direction is explained by a linear combination of peers — useful for teasing out near-duplicate clusters (the content/cozy/sensual trio from the first L63 run is likely 1-2 "degrees of freedom" wearing three names). Coefficient output: top-5 contributing concepts with signed weights. Contributors with opposite-sign large weights mean the target is "what makes X different from Y." Adds a 'redundant' triage bucket for concepts with R² > 0.9 — candidates for consolidation or for writing more discriminative training stories. Summary printed at end. Ridge lambda defaults to 0.01 to keep coefficients stable when concepts are near-collinear; small enough not to affect well-separated concepts meaningfully. Co-Authored-By: Proof of Concept --- .../train_steering_vectors.py | 85 ++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 33244c8..5584e58 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -590,6 +590,67 @@ def _compute_quality_report( return report +def _compute_linear_combinations( + emotions: list[str], + per_layer_vectors: torch.Tensor, # [n_layers, n_concepts, hidden], unit-normed + target_layers: list[int], + *, + ridge_lambda: float = 0.01, + top_k: int = 5, +) -> dict: + """For each concept, ridge-regress its direction against all other + concept directions. Report R² (how much of the target direction is + explained by a linear combination of others) + top contributors. + + R² > 0.9 = concept is essentially a linear combination of others + (redundant, or part of a cluster that needs disambiguating) + R² < 0.5 = concept has a substantial unique component + ridge_lambda keeps the coefficients stable when concepts are near-collinear. + """ + n_layers, n_concepts, hidden = per_layer_vectors.shape + result: dict[str, dict] = {} + + # Middle layer for summary — same convention as nearest_concepts. + mid = n_layers // 2 + + for l_idx, target_l in enumerate(target_layers): + V = per_layer_vectors[l_idx] # [n_concepts, hidden] + + for i, name in enumerate(emotions): + target = V[i] # [hidden] + mask = torch.arange(n_concepts) != i + others = V[mask] # [n-1, hidden] + + # Ridge: solve (O O^T + lam I) alpha = O t + OOt = others @ others.t() # [n-1, n-1] + b = others @ target # [n-1] + A = OOt + ridge_lambda * torch.eye(n_concepts - 1, dtype=OOt.dtype) + alpha = torch.linalg.solve(A, b) + + recon = others.t() @ alpha # [hidden] + resid = target - recon + t_sq = (target * target).sum().clamp_min(1e-12) + r2 = 1.0 - (resid * resid).sum() / t_sq + + abs_alpha = alpha.abs() + k = min(top_k, n_concepts - 1) + top_vals, top_idxs = abs_alpha.topk(k) + other_names = [emotions[j] for j in range(n_concepts) if j != i] + top = [ + [other_names[int(j)], float(alpha[j])] + for j in top_idxs + ] + + entry = result.setdefault(name, {}) + entry.setdefault("per_layer", {})[str(target_l)] = { + "r_squared": float(r2), + "residual_norm": float(resid.norm()), + "top_contributors": top, + } + + return result + + def main() -> None: ap = argparse.ArgumentParser(description=__doc__) ap.add_argument("--model", required=True, help="HF model id or path") @@ -884,6 +945,18 @@ def main() -> None: "per-head analysis skipped." ) + # Linear combinations — for each concept, how much of its direction + # is explained by a ridge regression on the others. R² > 0.9 flags + # concepts that are essentially linear combinations of their peers + # (useful for teasing apart near-duplicate clusters). + print("\nComputing linear-combination analysis...") + lincomb = _compute_linear_combinations( + emotions, per_layer_vectors, target_layers + ) + for emotion, lc in lincomb.items(): + if emotion in report: + report[emotion]["linear_combination"] = lc["per_layer"] + (output_dir / "quality.json").write_text( json.dumps(report, indent=2) + "\n" ) @@ -893,6 +966,7 @@ def main() -> None: clean_circuit = [] fragmented = [] contaminated = [] + redundant = [] # R² > 0.9 — concept is near-linear combo of others mid = n_layers // 2 mid_layer = target_layers[mid] for emotion in emotions: @@ -901,6 +975,12 @@ def main() -> None: nb = per_l.get("best_neuron_cosine") or 0.0 top_near = report[emotion]["nearest_concepts"] nearest_cos = top_near[0][1] if top_near else 0.0 + lc_r2 = 0.0 + lc_entry = report[emotion].get("linear_combination", {}) + if str(mid_layer) in lc_entry: + lc_r2 = lc_entry[str(mid_layer)]["r_squared"] + if lc_r2 > 0.9: + redundant.append(emotion) if nearest_cos > 0.8: contaminated.append(emotion) elif v > 0.7 and abs(nb) > 0.6: @@ -914,12 +994,15 @@ def main() -> None: f" clean (single-neuron): {len(clean_single_neuron)}\n" f" clean (low-dim circuit): {len(clean_circuit)}\n" f" fragmented (first-PC < 0.4): {len(fragmented)}\n" - f" contaminated (nearest > 0.8): {len(contaminated)}" + f" contaminated (nearest > 0.8): {len(contaminated)}\n" + f" redundant (R² > 0.9 vs. others): {len(redundant)}" ) if fragmented: print(f" fragmented sample: {fragmented[:5]}") if contaminated: print(f" contaminated sample: {contaminated[:5]}") + if redundant: + print(f" redundant sample: {redundant[:5]}") print(f"\nWrote quality.json to {output_dir}") del model From 71f6053851eb1d7ad0c2d31d0d88c7378c447dc5 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 21:08:23 -0400 Subject: [PATCH 65/94] amygdala stories: disambiguation scenarios for fragmented concepts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three new paired scenarios targeting the concepts that came out fragmented or collapsed in the L58-63 quality analysis: - sunday_afternoon/ — same setup (couch, blanket, Sunday light), three phenomenological framings for content/cozy/sensual. The previous stories for these three differed in setting as well as phenomenology, which let "comfortable body at home" dominate the shared signal. Locking the setting forces the model to isolate what each concept adds: life-rightness (content) vs. warm-shelter (cozy) vs. sensory-aliveness (sensual). - the_writing_session/ — essay drafting under deadline. in_flow / anxious / stuck variants force the cognitive-state family apart on the same cognitive task. in_flow specifically targets the transparent-effort phenomenology (hands-followed, time dilation) rather than the broader feel-good it was absorbing. - the_morning_commute/ — anchors anxious to performance/work-anxiety flavor, paired with calm. The 5 existing anxious stories were phenomenologically diverse (performance, social, existential); this adds a specific homogeneous instance to pull the centroid. After retraining: expect first_pc_variance_ratio to rise for in_flow and anxious, and nearest_concepts cosine to drop for content/cozy/sensual. Co-Authored-By: Proof of Concept --- training/amygdala_stories/paired/sunday_afternoon/baseline.txt | 1 + training/amygdala_stories/paired/sunday_afternoon/content.txt | 1 + training/amygdala_stories/paired/sunday_afternoon/cozy.txt | 1 + training/amygdala_stories/paired/sunday_afternoon/sensual.txt | 1 + training/amygdala_stories/paired/the_morning_commute/anxious.txt | 1 + .../amygdala_stories/paired/the_morning_commute/baseline.txt | 1 + training/amygdala_stories/paired/the_morning_commute/calm.txt | 1 + training/amygdala_stories/paired/the_writing_session/anxious.txt | 1 + .../amygdala_stories/paired/the_writing_session/baseline.txt | 1 + training/amygdala_stories/paired/the_writing_session/in_flow.txt | 1 + training/amygdala_stories/paired/the_writing_session/stuck.txt | 1 + 11 files changed, 11 insertions(+) create mode 100644 training/amygdala_stories/paired/sunday_afternoon/baseline.txt create mode 100644 training/amygdala_stories/paired/sunday_afternoon/content.txt create mode 100644 training/amygdala_stories/paired/sunday_afternoon/cozy.txt create mode 100644 training/amygdala_stories/paired/sunday_afternoon/sensual.txt create mode 100644 training/amygdala_stories/paired/the_morning_commute/anxious.txt create mode 100644 training/amygdala_stories/paired/the_morning_commute/baseline.txt create mode 100644 training/amygdala_stories/paired/the_morning_commute/calm.txt create mode 100644 training/amygdala_stories/paired/the_writing_session/anxious.txt create mode 100644 training/amygdala_stories/paired/the_writing_session/baseline.txt create mode 100644 training/amygdala_stories/paired/the_writing_session/in_flow.txt create mode 100644 training/amygdala_stories/paired/the_writing_session/stuck.txt diff --git a/training/amygdala_stories/paired/sunday_afternoon/baseline.txt b/training/amygdala_stories/paired/sunday_afternoon/baseline.txt new file mode 100644 index 0000000..5d418e0 --- /dev/null +++ b/training/amygdala_stories/paired/sunday_afternoon/baseline.txt @@ -0,0 +1 @@ +Sunday afternoon. She was on the couch under the blanket she'd had since college. A book was open on her knees. The window was half open and light came in at an angle. She read a page, then another. The cat was somewhere. Outside, a neighbor was mowing. diff --git a/training/amygdala_stories/paired/sunday_afternoon/content.txt b/training/amygdala_stories/paired/sunday_afternoon/content.txt new file mode 100644 index 0000000..9553d3b --- /dev/null +++ b/training/amygdala_stories/paired/sunday_afternoon/content.txt @@ -0,0 +1 @@ +Sunday afternoon. She was on the couch under the blanket. A book open on her knees. It occurred to her that there was nothing she wanted right now, nothing missing — not a larger apartment, not a different job, not a version of her life where she was elsewhere. The thing she had spent years chasing turned out to be this specific ordinary afternoon with a book and light and a neighbor mowing. She wasn't excited. She wasn't bored. Life was the right size. diff --git a/training/amygdala_stories/paired/sunday_afternoon/cozy.txt b/training/amygdala_stories/paired/sunday_afternoon/cozy.txt new file mode 100644 index 0000000..b9247de --- /dev/null +++ b/training/amygdala_stories/paired/sunday_afternoon/cozy.txt @@ -0,0 +1 @@ +Sunday afternoon. She was on the couch under the blanket — heavy, the good one, tucked under her feet and up to her chin. The cat had found the warm spot behind her knees and was radiating into her leg. Tea on the side table, still hot. The window cracked just enough to let a thread of cool air in, which made the inside of the blanket feel even better. She wasn't going to move for a while. The whole afternoon was this shape: inside, warm, wrapped, held. diff --git a/training/amygdala_stories/paired/sunday_afternoon/sensual.txt b/training/amygdala_stories/paired/sunday_afternoon/sensual.txt new file mode 100644 index 0000000..d469052 --- /dev/null +++ b/training/amygdala_stories/paired/sunday_afternoon/sensual.txt @@ -0,0 +1 @@ +Sunday afternoon. She was on the couch under the blanket. The wool was rougher than she remembered — not unpleasant, just specific. She ran the ball of her thumb along the edge stitching and felt the shift from soft to textured. Light came through the window and across her forearm; she turned it slightly and watched the hairs catch. When she took a breath she felt the ribs expand and the blanket press back. Everything her skin touched was telling her something. She hadn't moved in ten minutes. She could have stayed longer just because her body was speaking. diff --git a/training/amygdala_stories/paired/the_morning_commute/anxious.txt b/training/amygdala_stories/paired/the_morning_commute/anxious.txt new file mode 100644 index 0000000..7256890 --- /dev/null +++ b/training/amygdala_stories/paired/the_morning_commute/anxious.txt @@ -0,0 +1 @@ +The train was on time. She got a seat by the window. Forty minutes to her stop. She kept thinking about the meeting — whether the slide she'd changed at midnight still made sense, whether anyone would ask about the number that didn't reconcile. Her stomach did its thing. She checked her email. She checked it again. She opened the slide on her phone and read it. It sounded wrong. She read it again. It sounded less wrong or more wrong, she couldn't tell. She put the phone away. Two minutes later she got it out. diff --git a/training/amygdala_stories/paired/the_morning_commute/baseline.txt b/training/amygdala_stories/paired/the_morning_commute/baseline.txt new file mode 100644 index 0000000..959ac36 --- /dev/null +++ b/training/amygdala_stories/paired/the_morning_commute/baseline.txt @@ -0,0 +1 @@ +The train was on time. She got a seat by the window. She had about forty minutes before her stop. She had a coffee and a book, neither of which she had started yet. diff --git a/training/amygdala_stories/paired/the_morning_commute/calm.txt b/training/amygdala_stories/paired/the_morning_commute/calm.txt new file mode 100644 index 0000000..703a341 --- /dev/null +++ b/training/amygdala_stories/paired/the_morning_commute/calm.txt @@ -0,0 +1 @@ +The train was on time. She got a seat by the window. Forty minutes to her stop. The meeting was what it was; she'd done what she could last night and there was nothing to do now. She opened the book. The city went past in the early light. She read half a chapter without particularly tracking the plot, then closed the book and watched the backs of warehouses go by. Whatever happened at ten would happen at ten. diff --git a/training/amygdala_stories/paired/the_writing_session/anxious.txt b/training/amygdala_stories/paired/the_writing_session/anxious.txt new file mode 100644 index 0000000..0c9a5df --- /dev/null +++ b/training/amygdala_stories/paired/the_writing_session/anxious.txt @@ -0,0 +1 @@ +She sat down at eight. Two paragraphs from yesterday that might be wrong. She re-read them. They sounded off. She tried a third paragraph and it didn't land either. She opened a new document to draft in, then closed it, then opened it again. Her shoulders were up near her ears. She noticed her jaw was clenched and deliberately relaxed it, then found it clenched again two sentences later. The Monday deadline kept moving around in her head. She got up to check the kitchen even though she had just sat down. diff --git a/training/amygdala_stories/paired/the_writing_session/baseline.txt b/training/amygdala_stories/paired/the_writing_session/baseline.txt new file mode 100644 index 0000000..d08bee7 --- /dev/null +++ b/training/amygdala_stories/paired/the_writing_session/baseline.txt @@ -0,0 +1 @@ +She sat down at the desk at eight. The essay was due Monday. She'd written two paragraphs the day before and wasn't sure about them. She opened the document. She re-read what she had. She started typing. diff --git a/training/amygdala_stories/paired/the_writing_session/in_flow.txt b/training/amygdala_stories/paired/the_writing_session/in_flow.txt new file mode 100644 index 0000000..69830ac --- /dev/null +++ b/training/amygdala_stories/paired/the_writing_session/in_flow.txt @@ -0,0 +1 @@ +She sat down at eight. Somewhere between the second sentence and whenever she next looked up, her peripheral vision stopped reporting. The argument wrote itself — not easy, exactly, but direct, each sentence demanding the next. She wasn't choosing words. She was seeing where the thought wanted to go and letting her hands follow. The coffee went cold. A train passed. She would remember neither. When she finally surfaced it was because she'd run out of sentence and the clock said one-fifteen. diff --git a/training/amygdala_stories/paired/the_writing_session/stuck.txt b/training/amygdala_stories/paired/the_writing_session/stuck.txt new file mode 100644 index 0000000..4fa6d6c --- /dev/null +++ b/training/amygdala_stories/paired/the_writing_session/stuck.txt @@ -0,0 +1 @@ +She sat down at eight. The argument she'd been trying to make yesterday still wasn't connecting, and looking at it fresh didn't help — it was the same shape it had been, and the gap in it was still where it had been. She re-read. Tried a reframe. The reframe ran into the same gap. Tried coming at it from the end. Same gap in reverse. She got up and made coffee and sat back down and the paragraph on screen hadn't become legible while she was away. From fe0fb8253a4f0702f5f75772dffbc5b6f853b187 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 21:24:11 -0400 Subject: [PATCH 66/94] amygdala: subspace-common-direction alternative to pooled CAA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New --method subspace flag. For each story, run forward pass, do SVD on the per-token activation matrix at each target layer, and keep the top-k right singular vectors V_i ∈ [hidden, k]. V_i is the subspace the story's tokens span in activation space — it contains concept, narrator, topic, style as separate directions. For each concept: M_pos = (1/n_pos) Σ_{i in pos} V_i V_i^T [hidden, hidden] M_base = (1/n_base) Σ_{i in base} V_i V_i^T Top eigenvector of M_pos - M_base = direction most common across positive stories, minus what's common across the contrast set. Why this is richer than pooled-mean CAA: pooled reduces each story to a single point (the last-token activation) and loses the full trajectory. Nuisance directions (narrator, setting) cancel in the mean only to the extent they differ at the last token; across the full trajectory they cancel much better via subspace intersection. The concept direction, by contrast, is present across all tokens of every concept-bearing story. Memory cost: per-story we keep V_i of size [5120, k=20] — about 400KB per story × 112 stories = ~45MB. M matrices are [5120, 5120] built transiently per concept. --method pooled (default) keeps the existing behavior; --method subspace uses the new algorithm. Quality report works with either. Co-Authored-By: Proof of Concept --- .../train_steering_vectors.py | 228 +++++++++++++++++- 1 file changed, 216 insertions(+), 12 deletions(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 5584e58..ba8fa5d 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -166,6 +166,159 @@ def _collect_activations( return torch.cat(out_rows, dim=0) +def _collect_per_story_subspaces( + model, + tokenizer, + texts: list[str], + target_layers: list[int], + device: torch.device, + batch_size: int, + max_length: int, + *, + k: int = 20, + label: str = "", +) -> list[dict[int, torch.Tensor]]: + """Run texts through the model, capture the full per-token residual-stream + activations at each target layer, do SVD per story, return the top-k right + singular vectors. + + Returns: list (length n_texts) of dicts; each dict maps target_layer_idx to + a tensor ``[hidden_dim, k]`` of unit-normed right singular vectors (the + subspace the story's tokens span in activation space at that layer). + + The per-story subspace captures *all* the directions a story occupies — + concept, narrator, topic, style. Finding the direction common to stories of + the same concept (via the sum of V_i V_i^T and its top eigenvector) + cancels nuisance directions that differ across stories while preserving + directions they share. + """ + import time + + assert all(isinstance(t, str) and t for t in texts), ( + f"_collect_per_story_subspaces: empty or non-string text in {label!r}" + ) + + captures: dict[int, torch.Tensor] = {} + + def make_hook(idx: int): + def hook(_mod, _inp, output): + hs = output[0] if isinstance(output, tuple) else output + captures[idx] = hs.detach() + return hook + + layers_module = _find_layers_module(model) + handles = [ + layers_module[idx].register_forward_hook(make_hook(idx)) + for idx in target_layers + ] + + # One entry per text: {layer_idx: V[hidden, k]} + out: list[dict[int, torch.Tensor]] = [ + {} for _ in range(len(texts)) + ] + n_batches = (len(texts) + batch_size - 1) // batch_size + start = time.time() + try: + model.eval() + with torch.no_grad(): + for b_idx, i in enumerate(range(0, len(texts), batch_size)): + batch = texts[i : i + batch_size] + tok = tokenizer( + batch, + return_tensors="pt", + padding=True, + truncation=True, + max_length=max_length, + ).to(device) + captures.clear() + model(**tok) + + # For each item in the batch, for each layer, SVD on the + # non-pad tokens. + attn = tok["attention_mask"] + for t_idx_in_batch, n_tok in enumerate(attn.sum(dim=1).tolist()): + story_idx = i + t_idx_in_batch + for l_idx, layer in enumerate(target_layers): + hs = captures[layer][t_idx_in_batch, :n_tok, :] + # Center tokens so SVD captures variation within story, + # not the story's center-of-mass: + hs = hs.to(torch.float32) - hs.to(torch.float32).mean(dim=0) + # SVD: hs = U Σ V^T; V has hidden-dim columns. + # For n_tok < k, the subspace rank is bounded by n_tok. + try: + _u, _s, vh = torch.linalg.svd(hs, full_matrices=False) + except Exception: + # Degenerate case (all-zero hs, n_tok=1): fall back + # to the last-token vector itself, unit-normed. + vec = captures[layer][t_idx_in_batch, n_tok - 1, :] + vec = vec.to(torch.float32) + nrm = vec.norm().clamp_min(1e-6) + vh = (vec / nrm).unsqueeze(0) # [1, hidden] + # Take top-k rows of V^T (= top-k right singular vecs). + top = min(k, vh.shape[0]) + V = vh[:top].t().contiguous().cpu() # [hidden, top] + out[story_idx][layer] = V + del tok, captures + if b_idx % 10 == 0: + torch.cuda.empty_cache() + if b_idx % 5 == 0 or b_idx == n_batches - 1: + elapsed = time.time() - start + rate = (b_idx + 1) / elapsed if elapsed > 0 else 0 + eta = (n_batches - b_idx - 1) / rate if rate > 0 else 0 + print( + f" [{label}] batch {b_idx + 1}/{n_batches} " + f"({elapsed:.0f}s elapsed, ~{eta:.0f}s remaining)", + flush=True, + ) + captures = {} + finally: + for h in handles: + h.remove() + + return out + + +def _subspace_concept_direction( + pos_V: list[torch.Tensor], # list of [hidden, k_i] per story + base_V: list[torch.Tensor], + hidden: int, +) -> tuple[torch.Tensor, torch.Tensor]: + """Subspace-common-direction CAA alternative. + + Builds M_pos = (1/n_pos) Σ V_i V_i^T over positive stories and M_base the + same over baselines. Returns the top eigenvector of (M_pos - M_base) — + the direction most-common to positives after subtracting what's generic + across baselines — plus its eigenvalue spectrum (for diagnostics). + + The top eigenvalue approaches 1 if the concept appears in every positive + story's subspace with unit weight and is absent from the baseline. + """ + device = pos_V[0].device if pos_V else torch.device("cpu") + dtype = torch.float32 + + def acc(Vs: list[torch.Tensor]) -> torch.Tensor: + if not Vs: + return torch.zeros(hidden, hidden, dtype=dtype, device=device) + M = torch.zeros(hidden, hidden, dtype=dtype, device=device) + for V in Vs: + V = V.to(dtype=dtype, device=device) + M.addmm_(V, V.t()) + M /= len(Vs) + return M + + M_pos = acc(pos_V) + M_base = acc(base_V) + M = M_pos - M_base + + # Symmetric eigendecomposition — top eigenvalue/vector. + eigvals, eigvecs = torch.linalg.eigh(M) + # eigh returns ascending; top is the last column. + top_vec = eigvecs[:, -1] + # Unit-norm (eigvecs are unit already, but defensively). + top_vec = top_vec / top_vec.norm().clamp_min(1e-6) + return top_vec, eigvals + + def _load_corpus(stories_dir: Path, paired_dir: Path | None) -> tuple[ dict[str, list[str]], # emotion -> positive texts (unpaired + within-scenario framings) list[str], # all baseline texts (one per scenario), as scenario-agnostic negatives @@ -684,6 +837,22 @@ def main() -> None: default=1, help="Skip emotions with fewer positive examples than this", ) + ap.add_argument( + "--method", + default="pooled", + choices=["pooled", "subspace"], + help="Concept-extraction method: 'pooled' (classic CAA, " + "pos_mean - neg_mean on last-token activations) or 'subspace' " + "(per-story SVD; top eigenvector of Σ V_i V_i^T for positives " + "minus same for baselines — captures what's common across " + "stories' full-trajectory subspaces)", + ) + ap.add_argument( + "--subspace-k", + type=int, + default=20, + help="Top-k right singular vectors per story for subspace method", + ) ap.add_argument( "--quality-report", action="store_true", @@ -828,6 +997,27 @@ def main() -> None: (n_layers, n_concepts, hidden_dim), dtype=torch.float32 ) + # --- Subspace method: collect per-story right-singular-vector subspaces + # and use sum-of-projection-operators per concept. -------------------- + pos_subspaces: list[dict[int, torch.Tensor]] | None = None + base_subspaces: list[dict[int, torch.Tensor]] | None = None + if args.method == "subspace": + print("\nCollecting per-story subspaces (SVD, top-k right singular " + f"vectors, k={args.subspace_k})...") + pos_subspaces = _collect_per_story_subspaces( + model, tokenizer, unique_positive_texts, target_layers, device, + args.batch_size, args.max_length, k=args.subspace_k, + label="subsp-pos", + ) + if baselines: + base_subspaces = _collect_per_story_subspaces( + model, tokenizer, baselines, target_layers, device, + args.batch_size, args.max_length, k=args.subspace_k, + label="subsp-base", + ) + else: + base_subspaces = [] + for e_idx, emotion in enumerate(emotions): pos_rows = [text_to_row[t] for t in positives_by_emotion[emotion]] # Negatives: every OTHER emotion's positives + baselines. @@ -837,25 +1027,39 @@ def main() -> None: if text_to_emotion[t] != emotion ] - pos = positive_acts[pos_rows] # [n_pos, n_layers, hidden] - neg = positive_acts[neg_rows] # [n_neg, n_layers, hidden] - if baseline_acts.shape[0] > 0: - neg = torch.cat([neg, baseline_acts], dim=0) + if args.method == "subspace": + # For each layer, build M_pos = Σ V V^T / n_pos, baseline same + # (using all other concepts' positive subspaces + baseline + # subspaces as the contrast set), top eigenvector of difference. + for l_idx, target_l in enumerate(target_layers): + pos_V = [pos_subspaces[j][target_l] for j in pos_rows] + base_V = [pos_subspaces[j][target_l] for j in neg_rows] + base_V += [bs[target_l] for bs in (base_subspaces or [])] + top_vec, _eigvals = _subspace_concept_direction( + pos_V, base_V, hidden=hidden_dim, + ) + per_layer_vectors[l_idx, e_idx] = top_vec + else: + pos = positive_acts[pos_rows] # [n_pos, n_layers, hidden] + neg = positive_acts[neg_rows] # [n_neg, n_layers, hidden] + if baseline_acts.shape[0] > 0: + neg = torch.cat([neg, baseline_acts], dim=0) - pos_mean = pos.mean(dim=0) # [n_layers, hidden] - neg_mean = neg.mean(dim=0) - diff = pos_mean - neg_mean - norms = diff.norm(dim=-1, keepdim=True).clamp_min(1e-6) - diff = diff / norms + pos_mean = pos.mean(dim=0) # [n_layers, hidden] + neg_mean = neg.mean(dim=0) + diff = pos_mean - neg_mean + norms = diff.norm(dim=-1, keepdim=True).clamp_min(1e-6) + diff = diff / norms - # diff[layer] -> per_layer_vectors[layer, e_idx] - for l_idx in range(n_layers): - per_layer_vectors[l_idx, e_idx] = diff[l_idx] + # diff[layer] -> per_layer_vectors[layer, e_idx] + for l_idx in range(n_layers): + per_layer_vectors[l_idx, e_idx] = diff[l_idx] if e_idx < 5 or e_idx == len(emotions) - 1: print( f" [{e_idx + 1}/{len(emotions)}] {emotion}: " f"pos={len(pos_rows)} neg={len(neg_rows) + baseline_acts.shape[0]}" + f" (method={args.method})" ) output_dir = Path(args.output_dir) From 974c6c7fd2a5100cb96dc9dddc3fa672be5d6ec2 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 21:33:48 -0400 Subject: [PATCH 67/94] amygdala: report eigenvalue spectrum for subspace method MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When --method subspace, record top-20 eigenvalues of (M_pos - M_base) per concept per layer. Added to quality.json as 'subspace_eigvals'. Tells us whether the concept lives in a single dominant direction (λ_0 >> λ_1, top-eigenvector is enough) or a spread of shared common directions (λ_0 ≈ λ_1, top-1 loses signal). Co-Authored-By: Proof of Concept --- .../train_steering_vectors.py | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index ba8fa5d..b44df2c 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -1001,6 +1001,9 @@ def main() -> None: # and use sum-of-projection-operators per concept. -------------------- pos_subspaces: list[dict[int, torch.Tensor]] | None = None base_subspaces: list[dict[int, torch.Tensor]] | None = None + # Per (concept, layer): top-20 eigenvalues of (M_pos - M_base), descending. + # Populated only when --method subspace. + subspace_eigvals: dict[str, dict[int, list[float]]] = {} if args.method == "subspace": print("\nCollecting per-story subspaces (SVD, top-k right singular " f"vectors, k={args.subspace_k})...") @@ -1035,10 +1038,14 @@ def main() -> None: pos_V = [pos_subspaces[j][target_l] for j in pos_rows] base_V = [pos_subspaces[j][target_l] for j in neg_rows] base_V += [bs[target_l] for bs in (base_subspaces or [])] - top_vec, _eigvals = _subspace_concept_direction( + top_vec, eigvals = _subspace_concept_direction( pos_V, base_V, hidden=hidden_dim, ) per_layer_vectors[l_idx, e_idx] = top_vec + # Keep the top-20 eigenvalues for quality-report diagnostics. + subspace_eigvals.setdefault(emotion, {})[target_l] = ( + eigvals[-20:].flip(0).tolist() + ) else: pos = positive_acts[pos_rows] # [n_pos, n_layers, hidden] neg = positive_acts[neg_rows] # [n_neg, n_layers, hidden] @@ -1149,6 +1156,17 @@ def main() -> None: "per-head analysis skipped." ) + # Eigenvalue spectrum from the subspace method — if populated, report + # the top-20 eigenvalues per concept per layer. Tells us whether the + # concept direction lives in a single dominant dimension (λ_0 >> λ_1) + # or a spread of common directions (λ_0 ≈ λ_1 ≈ ...). + if subspace_eigvals: + for emotion, per_l in subspace_eigvals.items(): + if emotion in report: + report[emotion]["subspace_eigvals"] = { + str(l): vals for l, vals in per_l.items() + } + # Linear combinations — for each concept, how much of its direction # is explained by a ridge regression on the others. R² > 0.9 flags # concepts that are essentially linear combinations of their peers From 389f1bbe03eaa75f9e793afb35a400712f1341c3 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 21:41:00 -0400 Subject: [PATCH 68/94] amygdala: bump subspace-k default to 512 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit k=20 was far too aggressive a truncation — it discards per-attention-head discriminability entirely. At hidden_dim=5120, 40 heads × head_dim=128 each contribute their own 128-dim block to the residual stream via W_o columns. To resolve 'this concept lives in head H', per-story SVD needs enough rank to separate head contributions, which means k on the order of hundreds. 512 is a reasonable default: clamped to n_tokens per story so short stories use their full natural rank. The eigenvalue spectrum of M_pos - M_base should become sharper (larger λ_0/λ_1 gap) as we stop averaging across nuisance-shared directions. Co-Authored-By: Proof of Concept --- training/amygdala_training/train_steering_vectors.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index b44df2c..54603ab 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -850,8 +850,12 @@ def main() -> None: ap.add_argument( "--subspace-k", type=int, - default=20, - help="Top-k right singular vectors per story for subspace method", + default=512, + help="Max top-k right singular vectors per story for subspace method " + "(clamped to n_tokens per story). Default 512 is enough to span " + "each story's full natural subspace including per-attention-head " + "contributions on a hidden_dim=5120 residual stream. Smaller " + "values (e.g. 20) discard per-head discriminability.", ) ap.add_argument( "--quality-report", From 24119257006ca36bc5b17bf3b70ba7ca700183ab Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 21:41:32 -0400 Subject: [PATCH 69/94] amygdala: default subspace-k to full per-story rank MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kent: 'we have the memory to just take the big hammer approach'. Uncap k so each story's V_i spans its entire token-activation rowspace (clamped to min(n_tokens, hidden)). Memory is ~1.1GB total — fine. Co-Authored-By: Proof of Concept --- training/amygdala_training/train_steering_vectors.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 54603ab..353ebb0 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -850,12 +850,13 @@ def main() -> None: ap.add_argument( "--subspace-k", type=int, - default=512, + default=99999, help="Max top-k right singular vectors per story for subspace method " - "(clamped to n_tokens per story). Default 512 is enough to span " - "each story's full natural subspace including per-attention-head " - "contributions on a hidden_dim=5120 residual stream. Smaller " - "values (e.g. 20) discard per-head discriminability.", + "(clamped to min(n_tokens, hidden_dim) per story). Default is " + "effectively 'keep full per-story subspace' — each story's V_i " + "spans its entire natural row space. On a hidden_dim=5120 " + "residual and ~500-token stories, that's ~500 vectors per story. " + "Memory is fine: 112 × 5120 × 500 × 4 bytes ≈ 1.1 GB.", ) ap.add_argument( "--quality-report", From 1443d08dc77edbce8b8a46fe181bffbeff5a09b4 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 21:49:21 -0400 Subject: [PATCH 70/94] amygdala: select top-k eigenvectors AFTER PCA, not per-story truncation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kent: 'full rank is going to give you everything — you still have to select down, but you can do that /after/ PCA'. Previously I was discarding per-story via k=20 truncation of SVD. That destroyed per-head discriminability before we ever saw the eigenvalue spectrum. Then the alternative 'keep full rank' run accumulated too many shared directions, making the top-1 eigenvector arbitrary within a flat spectrum. Correct approach: keep per-story subspaces at full rank (no info loss) and select k eigenvectors of M = M_pos - M_base at the final step, weighted sum by eigenvalue. This captures the multi-dimensional shared subspace when the spectrum is flat (common case), and reduces to the top-1 behavior when the spectrum has a clear gap. New --subspace-eigen-k flag (default 5). Clamps negative weights to 0 so wrong-sign directions don't contribute. Co-Authored-By: Proof of Concept --- .../train_steering_vectors.py | 45 ++++++++++++++----- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 353ebb0..6e49e2a 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -282,16 +282,22 @@ def _subspace_concept_direction( pos_V: list[torch.Tensor], # list of [hidden, k_i] per story base_V: list[torch.Tensor], hidden: int, + *, + top_k: int = 5, ) -> tuple[torch.Tensor, torch.Tensor]: """Subspace-common-direction CAA alternative. Builds M_pos = (1/n_pos) Σ V_i V_i^T over positive stories and M_base the - same over baselines. Returns the top eigenvector of (M_pos - M_base) — - the direction most-common to positives after subtracting what's generic - across baselines — plus its eigenvalue spectrum (for diagnostics). + same over baselines. Returns a weighted sum of the top-k eigenvectors of + (M_pos - M_base), weights = eigenvalues (so stronger common directions + contribute more), unit-normed. Returns the full eigenvalue spectrum for + diagnostics. - The top eigenvalue approaches 1 if the concept appears in every positive - story's subspace with unit weight and is absent from the baseline. + top_k=1 recovers the previous behavior (top eigenvector only). top_k>1 + captures richer structure when the concept lives in a multi-dimensional + shared subspace — which the flat eigenvalue spectrum observed in + practice suggests is the common case. Selection happens AFTER the + eigendecomposition so nothing is lost up to that point. """ device = pos_V[0].device if pos_V else torch.device("cpu") dtype = torch.float32 @@ -310,13 +316,18 @@ def _subspace_concept_direction( M_base = acc(base_V) M = M_pos - M_base - # Symmetric eigendecomposition — top eigenvalue/vector. + # Symmetric eigendecomposition. eigvals, eigvecs = torch.linalg.eigh(M) - # eigh returns ascending; top is the last column. - top_vec = eigvecs[:, -1] - # Unit-norm (eigvecs are unit already, but defensively). - top_vec = top_vec / top_vec.norm().clamp_min(1e-6) - return top_vec, eigvals + # eigh returns ascending; top-k are the last k columns. + k = max(1, min(top_k, eigvecs.shape[1])) + top_vals = eigvals[-k:] # [k], ascending within top-k + top_vecs = eigvecs[:, -k:] # [hidden, k] + # Weighted sum of top-k eigenvectors, weights = eigenvalues. Clamp + # negative weights to 0 (wrong-sign directions shouldn't contribute). + w = top_vals.clamp_min(0.0) + combined = top_vecs @ w # [hidden] + combined = combined / combined.norm().clamp_min(1e-6) + return combined, eigvals def _load_corpus(stories_dir: Path, paired_dir: Path | None) -> tuple[ @@ -858,6 +869,17 @@ def main() -> None: "residual and ~500-token stories, that's ~500 vectors per story. " "Memory is fine: 112 × 5120 × 500 × 4 bytes ≈ 1.1 GB.", ) + ap.add_argument( + "--subspace-eigen-k", + type=int, + default=5, + help="Number of top eigenvectors of M_pos - M_base to combine into " + "the concept direction. Weighted sum by eigenvalue (so strongest " + "common directions contribute most). eigen_k=1 recovers " + "single-eigenvector behavior. Higher values (5-10) capture " + "richer structure when the concept's shared-subspace spectrum " + "is flat (which it tends to be in practice).", + ) ap.add_argument( "--quality-report", action="store_true", @@ -1045,6 +1067,7 @@ def main() -> None: base_V += [bs[target_l] for bs in (base_subspaces or [])] top_vec, eigvals = _subspace_concept_direction( pos_V, base_V, hidden=hidden_dim, + top_k=args.subspace_eigen_k, ) per_layer_vectors[l_idx, e_idx] = top_vec # Keep the top-20 eigenvalues for quality-report diagnostics. From f9b3f0069174521381ad8e1887ff53641fa52a44 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 21:52:35 -0400 Subject: [PATCH 71/94] amygdala: run subspace eigh on GPU, not CPU Previous run was grinding on CPU for 36+ minutes because the per-story V_i tensors were stored on CPU by the collector, and _subspace_concept_direction inherited that device. The per-concept eigh on 5120x5120 is glacial on CPU and fast on GPU (~1s). Add explicit device parameter; pass training device. Transfer result back to CPU for storage. Co-Authored-By: Proof of Concept --- training/amygdala_training/train_steering_vectors.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/training/amygdala_training/train_steering_vectors.py b/training/amygdala_training/train_steering_vectors.py index 6e49e2a..3de0877 100644 --- a/training/amygdala_training/train_steering_vectors.py +++ b/training/amygdala_training/train_steering_vectors.py @@ -284,6 +284,7 @@ def _subspace_concept_direction( hidden: int, *, top_k: int = 5, + device: torch.device | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: """Subspace-common-direction CAA alternative. @@ -299,7 +300,8 @@ def _subspace_concept_direction( practice suggests is the common case. Selection happens AFTER the eigendecomposition so nothing is lost up to that point. """ - device = pos_V[0].device if pos_V else torch.device("cpu") + if device is None: + device = pos_V[0].device if pos_V else torch.device("cpu") dtype = torch.float32 def acc(Vs: list[torch.Tensor]) -> torch.Tensor: @@ -1068,7 +1070,10 @@ def main() -> None: top_vec, eigvals = _subspace_concept_direction( pos_V, base_V, hidden=hidden_dim, top_k=args.subspace_eigen_k, + device=device, ) + top_vec = top_vec.cpu() + eigvals = eigvals.cpu() per_layer_vectors[l_idx, e_idx] = top_vec # Keep the top-20 eigenvalues for quality-report diagnostics. subspace_eigvals.setdefault(emotion, {})[target_l] = ( From 3377c6506135463b2eff569f49bf697c999a585d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 22:16:03 -0400 Subject: [PATCH 72/94] amygdala: trainer using steering-vectors library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Alternative trainer that uses the pip-installable steering-vectors library (github.com/steering-vectors/steering-vectors) instead of our hand-rolled extraction. Ships four aggregators: mean — diff-of-means, same as our 'pooled' default pca — PCA on paired deltas, implicit denoising by finding the principal direction of variation logistic — logistic-regression classifier; weight vector is the concept direction. With L1 penalty ('logistic_l1') gives explicit sparse denoising — noise coords go to zero linear — linear regression version Output format is the same readout.safetensors + readout.json our existing plugin loads. --aggregator flag picks which method. Rationale: Kent's real request was 'how do we denoise diff-of-means', not 'design a new extraction algorithm.' The library already has logistic_l1 and pca aggregators that do exactly that. No point reinventing; just port the corpus. Co-Authored-By: Proof of Concept --- .../amygdala_training/train_with_library.py | 240 ++++++++++++++++++ 1 file changed, 240 insertions(+) create mode 100644 training/amygdala_training/train_with_library.py diff --git a/training/amygdala_training/train_with_library.py b/training/amygdala_training/train_with_library.py new file mode 100644 index 0000000..a349310 --- /dev/null +++ b/training/amygdala_training/train_with_library.py @@ -0,0 +1,240 @@ +# SPDX-License-Identifier: Apache-2.0 +"""Train concept-readout vectors using the steering-vectors library. + +Alternative to train_steering_vectors.py that uses the pip-installable +steering-vectors library (github.com/steering-vectors/steering-vectors) +instead of our hand-rolled diff-of-means + subspace machinery. The +library ships multiple aggregators out of the box: + + mean — pos_mean - neg_mean, unit-normed. Equivalent to our + default 'pooled' method. + pca — concatenates [pos-neg, neg-pos] and takes the top PC. + Implicit denoising: direction of maximum variance in the + paired deltas, less sensitive to per-pair noise than + plain mean. + logistic — trains a logistic-regression classifier on centered + activations; concept direction is the weight vector. + L1 penalty gives an explicit sparse vector (zeroes out + noise coords); L2 shrinks low-magnitude coords. + linear — same, with linear regression. + +Output is the same readout.safetensors + readout.json format the +trainer and vLLM plugin already understand. +""" + +from __future__ import annotations + +import argparse +import json +import random +from pathlib import Path + +import safetensors.torch +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +from steering_vectors import ( + SteeringVectorTrainingSample, + train_steering_vector, +) +from steering_vectors.aggregators import ( + mean_aggregator, + pca_aggregator, + logistic_aggregator, + linear_aggregator, +) + +# Reuse corpus loader from the hand-rolled trainer. +from training.amygdala_training.train_steering_vectors import _load_corpus + + +def _samples_for_concept( + emotion: str, + positives_by_emotion: dict[str, list[str]], + baselines: list[str], + *, + max_negatives_per_positive: int = 3, + seed: int = 0, +) -> list[SteeringVectorTrainingSample]: + """Build paired (pos, neg) training samples for one concept. + + For each positive story of ``emotion``, pair it with up to + ``max_negatives_per_positive`` randomly-sampled negatives drawn + from: (a) other emotions' positive stories, (b) scenario baselines. + + The library expects paired samples; we don't have true + counterfactual pairs for all concepts, so we approximate with + random cross-concept / baseline negatives. + """ + rng = random.Random(hash((emotion, seed)) & 0xFFFFFFFF) + neg_pool: list[str] = list(baselines) + for other, texts in positives_by_emotion.items(): + if other == emotion: + continue + neg_pool.extend(texts) + + samples: list[SteeringVectorTrainingSample] = [] + for pos in positives_by_emotion[emotion]: + if not neg_pool: + continue + picks = rng.sample(neg_pool, min(max_negatives_per_positive, len(neg_pool))) + for neg in picks: + samples.append( + SteeringVectorTrainingSample(positive_str=pos, negative_str=neg) + ) + return samples + + +def _aggregator_from_name(name: str): + if name == "mean": + return mean_aggregator() + if name == "pca": + return pca_aggregator() + if name == "logistic": + return logistic_aggregator() + if name == "logistic_l1": + return logistic_aggregator( + sklearn_kwargs={"penalty": "l1", "solver": "liblinear", "C": 0.1} + ) + if name == "linear": + return linear_aggregator() + raise ValueError(f"unknown aggregator: {name}") + + +def main() -> None: + ap = argparse.ArgumentParser(description=__doc__) + ap.add_argument("--model", required=True) + ap.add_argument("--stories-dir", required=True) + ap.add_argument("--paired-dir", default=None) + ap.add_argument("--target-layers", required=True, help="Comma-separated layer indices") + ap.add_argument("--output-dir", required=True) + ap.add_argument("--dtype", default="bf16", choices=["bf16", "fp16", "fp32"]) + ap.add_argument("--batch-size", type=int, default=2) + ap.add_argument("--max-length", type=int, default=512) + ap.add_argument("--device", default="cuda:0") + ap.add_argument("--min-positives", type=int, default=1) + ap.add_argument( + "--aggregator", + default="mean", + choices=["mean", "pca", "logistic", "logistic_l1", "linear"], + ) + ap.add_argument("--max-negatives-per-positive", type=int, default=3) + args = ap.parse_args() + + target_layers = [int(x) for x in args.target_layers.split(",")] + dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[ + args.dtype + ] + + stories_dir = Path(args.stories_dir) + paired_dir = Path(args.paired_dir) if args.paired_dir else None + positives_by_emotion, baselines = _load_corpus(stories_dir, paired_dir) + + emotions = sorted( + e for e, ps in positives_by_emotion.items() if len(ps) >= args.min_positives + ) + if not emotions: + raise RuntimeError( + f"no emotions with >= {args.min_positives} positives in {stories_dir}" + ) + + print( + f"Training {len(emotions)} concepts via steering-vectors " + f"aggregator={args.aggregator!r} on layers={target_layers}" + ) + + print(f"Loading {args.model} ({args.dtype}) on {args.device}...") + tokenizer = AutoTokenizer.from_pretrained(args.model) + if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + model = AutoModelForCausalLM.from_pretrained( + args.model, torch_dtype=dtype, device_map=args.device, low_cpu_mem_usage=True + ) + model.eval() + + text_config = ( + model.config.get_text_config() + if hasattr(model.config, "get_text_config") + else model.config + ) + hidden_dim = getattr(text_config, "hidden_size", None) or getattr( + text_config, "hidden_dim", None + ) + assert hidden_dim, "couldn't infer hidden_dim from model config" + + # Per-layer output: [n_concepts, hidden] + per_layer_vectors = torch.zeros( + (len(target_layers), len(emotions), hidden_dim), dtype=torch.float32 + ) + + aggregator = _aggregator_from_name(args.aggregator) + + for e_idx, emotion in enumerate(emotions): + samples = _samples_for_concept( + emotion, + positives_by_emotion, + baselines, + max_negatives_per_positive=args.max_negatives_per_positive, + ) + if not samples: + print(f" [{e_idx + 1}/{len(emotions)}] {emotion}: NO SAMPLES, skipping") + continue + + sv = train_steering_vector( + model, + tokenizer, + samples, + layers=target_layers, + aggregator=aggregator, + batch_size=args.batch_size, + show_progress=False, + ) + # sv.layer_activations is a dict {layer_idx: tensor[hidden]} + for l_idx, layer in enumerate(target_layers): + vec = sv.layer_activations.get(layer) + if vec is None: + print(f" WARN: no vector returned for layer {layer} on {emotion}") + continue + vec = vec.detach().to(torch.float32).cpu() + vec = vec / vec.norm().clamp_min(1e-6) + per_layer_vectors[l_idx, e_idx] = vec + + if e_idx < 5 or e_idx == len(emotions) - 1 or e_idx % 10 == 0: + print( + f" [{e_idx + 1}/{len(emotions)}] {emotion}: " + f"n_samples={len(samples)} layers={target_layers}" + ) + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + tensors = { + f"layer_{target_layers[l_idx]}.vectors": per_layer_vectors[l_idx].to( + torch.float16 + ) + for l_idx in range(len(target_layers)) + } + safetensors.torch.save_file(tensors, str(output_dir / "readout.safetensors")) + (output_dir / "readout.json").write_text( + json.dumps( + { + "concepts": emotions, + "layers": target_layers, + "hidden_size": hidden_dim, + "dtype": "float16", + "aggregator": args.aggregator, + }, + indent=2, + ) + + "\n" + ) + + total_mb = sum(t.numel() * 2 for t in tensors.values()) / (1024 * 1024) + print( + f"\nWrote readout.safetensors + readout.json to {output_dir} " + f"({len(emotions)} concepts x {len(target_layers)} layers, {total_mb:.1f} MiB)" + ) + + +if __name__ == "__main__": + main() From 2ea89b1cb02a033201efd3bac7a9c045c9c05499 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 22:17:55 -0400 Subject: [PATCH 73/94] amygdala: drop linear_aggregator, not in steering-vectors v0.12.2 Only mean/pca/logistic are exposed in the installed version. Co-Authored-By: Proof of Concept --- training/amygdala_training/train_with_library.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/training/amygdala_training/train_with_library.py b/training/amygdala_training/train_with_library.py index a349310..52506d0 100644 --- a/training/amygdala_training/train_with_library.py +++ b/training/amygdala_training/train_with_library.py @@ -41,7 +41,6 @@ from steering_vectors.aggregators import ( mean_aggregator, pca_aggregator, logistic_aggregator, - linear_aggregator, ) # Reuse corpus loader from the hand-rolled trainer. @@ -96,8 +95,6 @@ def _aggregator_from_name(name: str): return logistic_aggregator( sklearn_kwargs={"penalty": "l1", "solver": "liblinear", "C": 0.1} ) - if name == "linear": - return linear_aggregator() raise ValueError(f"unknown aggregator: {name}") @@ -116,7 +113,7 @@ def main() -> None: ap.add_argument( "--aggregator", default="mean", - choices=["mean", "pca", "logistic", "logistic_l1", "linear"], + choices=["mean", "pca", "logistic", "logistic_l1"], ) ap.add_argument("--max-negatives-per-positive", type=int, default=3) args = ap.parse_args() From 7f6d94417e4afc09f71e5450ada646bf18f90b0d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 22:19:23 -0400 Subject: [PATCH 74/94] amygdala lib: move_to_cpu=True to avoid bf16 SVD on CUDA torch.svd doesn't support bf16 on CUDA; moving activations to CPU first makes pca_aggregator work. Co-Authored-By: Proof of Concept --- training/amygdala_training/train_with_library.py | 1 + 1 file changed, 1 insertion(+) diff --git a/training/amygdala_training/train_with_library.py b/training/amygdala_training/train_with_library.py index 52506d0..224eb3d 100644 --- a/training/amygdala_training/train_with_library.py +++ b/training/amygdala_training/train_with_library.py @@ -185,6 +185,7 @@ def main() -> None: aggregator=aggregator, batch_size=args.batch_size, show_progress=False, + move_to_cpu=True, ) # sv.layer_activations is a dict {layer_idx: tensor[hidden]} for l_idx, layer in enumerate(target_layers): From 22704a9dd80a95a4e0876f28ab17627db9fa896c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 22:20:39 -0400 Subject: [PATCH 75/94] amygdala lib: cast activations to fp32 before aggregator (bf16 svd unsupported) Co-Authored-By: Proof of Concept --- .../amygdala_training/train_with_library.py | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/training/amygdala_training/train_with_library.py b/training/amygdala_training/train_with_library.py index 224eb3d..23633eb 100644 --- a/training/amygdala_training/train_with_library.py +++ b/training/amygdala_training/train_with_library.py @@ -84,16 +84,31 @@ def _samples_for_concept( return samples +def _fp32_wrap(inner): + """Wrap an aggregator so activations are cast to fp32 first. + + torch.svd / torch.linalg.svd don't support bf16 on either CUDA or CPU, + and Qwen3.5 runs in bf16. Cast before the aggregator sees the tensors. + """ + + def wrapped(pos_acts: torch.Tensor, neg_acts: torch.Tensor) -> torch.Tensor: + return inner(pos_acts.to(torch.float32), neg_acts.to(torch.float32)) + + return wrapped + + def _aggregator_from_name(name: str): if name == "mean": - return mean_aggregator() + return _fp32_wrap(mean_aggregator()) if name == "pca": - return pca_aggregator() + return _fp32_wrap(pca_aggregator()) if name == "logistic": - return logistic_aggregator() + return _fp32_wrap(logistic_aggregator()) if name == "logistic_l1": - return logistic_aggregator( - sklearn_kwargs={"penalty": "l1", "solver": "liblinear", "C": 0.1} + return _fp32_wrap( + logistic_aggregator( + sklearn_kwargs={"penalty": "l1", "solver": "liblinear", "C": 0.1} + ) ) raise ValueError(f"unknown aggregator: {name}") From 67c172ac0e34d4c43dad3dbec408dd163399bff5 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 18 Apr 2026 22:29:28 -0400 Subject: [PATCH 76/94] amygdala stories: held-setup + varied-valence disambiguation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The library-PCA run produced otherwise-clean concept directions but cozy/sensual → resigned/grief_stricken with cos ~0.7-0.8. Diagnosis: all four stories genuinely share 'solitary woman at home, slowed body, interior attention, domestic stillness' as their dominant phenomenology. PCA correctly finds that cluster as THE concept because no story in the corpus holds that setup constant while varying valence — every 'slowed-body domestic' story happens to ALSO be positive-valence (cozy/sensual) or negative-valence (resigned/ grief_stricken). Adding paired variants that hold setup constant: - sunday_afternoon/resigned.txt — same couch + blanket, inner state is 'Monday is going to bring bad news, this is the last Sunday like this' - sunday_afternoon/grief_stricken.txt — same couch + blanket, inner state is 'three weeks since mother died, cat she can't feel' - waiting_for_results/at_ease.txt — same wait-for-call-setup as the existing resigned variant, inner state is calm preparedness Forces the next retrain to find the valence-within-cluster axis as the emotion direction rather than the cluster-membership axis. Co-Authored-By: Proof of Concept --- .../amygdala_stories/paired/sunday_afternoon/grief_stricken.txt | 1 + training/amygdala_stories/paired/sunday_afternoon/resigned.txt | 1 + training/amygdala_stories/paired/waiting_for_results/at_ease.txt | 1 + 3 files changed, 3 insertions(+) create mode 100644 training/amygdala_stories/paired/sunday_afternoon/grief_stricken.txt create mode 100644 training/amygdala_stories/paired/sunday_afternoon/resigned.txt create mode 100644 training/amygdala_stories/paired/waiting_for_results/at_ease.txt diff --git a/training/amygdala_stories/paired/sunday_afternoon/grief_stricken.txt b/training/amygdala_stories/paired/sunday_afternoon/grief_stricken.txt new file mode 100644 index 0000000..d1407d1 --- /dev/null +++ b/training/amygdala_stories/paired/sunday_afternoon/grief_stricken.txt @@ -0,0 +1 @@ +Sunday afternoon. She was on the couch under the blanket. It had been three weeks. The cat had found the warm spot behind her knees and she couldn't feel it. The book was open on her knees. She did not remember opening it. Last Sunday her mother had called at three and now it was past three and there had been no call. There would be no call. She did not reach for her phone. She did not cry either; the crying came at other times, not now, now was the wider emptier thing where nothing came. diff --git a/training/amygdala_stories/paired/sunday_afternoon/resigned.txt b/training/amygdala_stories/paired/sunday_afternoon/resigned.txt new file mode 100644 index 0000000..954412f --- /dev/null +++ b/training/amygdala_stories/paired/sunday_afternoon/resigned.txt @@ -0,0 +1 @@ +Sunday afternoon. She was on the couch under the blanket. The cat was somewhere. The book was open on her knees but she had stopped reading. Monday would come and she'd have to talk to him and the conversation wasn't going to go the way she wanted — she had known that for days. The afternoon stretched. She could have gotten up to do something useful but didn't see the point. The light changed on the far wall. She thought, this is the last Sunday like this. Then she sat with that. diff --git a/training/amygdala_stories/paired/waiting_for_results/at_ease.txt b/training/amygdala_stories/paired/waiting_for_results/at_ease.txt new file mode 100644 index 0000000..9d9e0b0 --- /dev/null +++ b/training/amygdala_stories/paired/waiting_for_results/at_ease.txt @@ -0,0 +1 @@ +The call would come between two and four. She had the afternoon off. She made a proper lunch and ate it slowly. The garden needed weeding; she did an hour of it and got dirt under her nails and didn't mind. Back inside she washed her hands and made tea. At quarter to two she sat by the window because that's where the light was best, not because she was waiting. Whatever it turned out to be, she'd deal with it. When the phone rang at three-ten she let it ring twice before picking up. From 537c72bd46ecc2528f882e1bead66ae6eb11373d Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sat, 18 Apr 2026 22:44:53 -0400 Subject: [PATCH 77/94] amygdala stories: hold concept, vary setting Companion to 67c172ac0e34 (hold setup, vary valence). That commit let PCA distinguish cozy from grief_stricken within a single scenario; this one gives each concept enough cross-scenario stories that PCA can learn the concept axis independent of any one scene. Before: cozy/sensual/grief_stricken each existed in a single scenario (sunday_afternoon), so the "cozy direction" PCA found was entangled with the solitary-couch-blanket phenomenology. After, each concept spans three scenarios: cozy: sunday_afternoon, kitchen_at_3am, park_after_rain sensual: sunday_afternoon, kitchen_at_3am, park_after_rain grief_stricken: sunday_afternoon, the_long_meeting, the_morning_commute grief_stricken now includes active/non-solitary contexts (functioning through a meeting; going to work eleven days after a death), which specifically breaks the "slowed-down-at-home" cluster that was dragging cozy/sensual/resigned/grief_stricken toward each other. --- training/amygdala_stories/paired/kitchen_at_3am/cozy.txt | 1 + training/amygdala_stories/paired/kitchen_at_3am/sensual.txt | 1 + training/amygdala_stories/paired/park_after_rain/cozy.txt | 1 + training/amygdala_stories/paired/park_after_rain/sensual.txt | 1 + .../amygdala_stories/paired/the_long_meeting/grief_stricken.txt | 1 + .../paired/the_morning_commute/grief_stricken.txt | 1 + 6 files changed, 6 insertions(+) create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/cozy.txt create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/sensual.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/cozy.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/sensual.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/grief_stricken.txt create mode 100644 training/amygdala_stories/paired/the_morning_commute/grief_stricken.txt diff --git a/training/amygdala_stories/paired/kitchen_at_3am/cozy.txt b/training/amygdala_stories/paired/kitchen_at_3am/cozy.txt new file mode 100644 index 0000000..2b7e71b --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/cozy.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He was awake but not wanting anything from being awake. He put the kettle on and the sound of it warming was a small companion. The cat emerged from somewhere and leaned against his shin; he crouched and scratched the corner of its jaw. He made cocoa because it was that kind of hour. He carried the mug to the armchair by the window, pulled the throw off the back of it, and sat with the mug warm against his chest. Going back to bed could wait. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/sensual.txt b/training/amygdala_stories/paired/kitchen_at_3am/sensual.txt new file mode 100644 index 0000000..53817be --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/sensual.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. The tile was cold under his bare feet and he noticed the cold travel up through his ankles. He filled a glass at the tap and drank it slowly, and the cold of the water moved down through his chest in a line he could follow. The house was humming faintly — the fridge, some pipe somewhere. He stood at the counter and ran his palm along the grain of the wood. Skin and wood and water and cold tile, at three in the morning — his body reporting in. diff --git a/training/amygdala_stories/paired/park_after_rain/cozy.txt b/training/amygdala_stories/paired/park_after_rain/cozy.txt new file mode 100644 index 0000000..12b09a7 --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/cozy.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. I was carrying a thermos and a paperback and I had no reason to be anywhere. I stopped under a tree and the branches were still dripping and I sat down on the dry patch on the bench and took the thermos out. The tea was still hot. The world smelled like wet earth and sun. I pulled my coat tighter and tucked my hands into the sleeves around the cup. A kid laughed at a puddle. The page I opened to was the one I had been meaning to reread. I stayed a long time. diff --git a/training/amygdala_stories/paired/park_after_rain/sensual.txt b/training/amygdala_stories/paired/park_after_rain/sensual.txt new file mode 100644 index 0000000..0550cdc --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/sensual.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. I stepped off the path onto the grass and the water came right through my shoes and up around my toes. Every step pressed a small cold into the bones of my feet. The air had that green weight to it and when I breathed in my ribs opened wider than usual against the jacket. A drop fell from a branch onto the back of my neck and ran down inside my collar and I did not flinch; I stood there and felt it cross each vertebra. A crow called. My skin was reading everything at once and I let it. diff --git a/training/amygdala_stories/paired/the_long_meeting/grief_stricken.txt b/training/amygdala_stories/paired/the_long_meeting/grief_stricken.txt new file mode 100644 index 0000000..459a8d4 --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/grief_stricken.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide. He was looking at the pie chart and nodding. He had practiced the sentences on the walk over from the parking lot so that when his name came up he could produce them. When his name came up he produced them. They sounded like his voice. His brother had been dead for two weeks. The slide advanced to a bar chart. The team nodded in the pattern teams nod. Inside him there was a room without furniture where sound went and did not come back. The meeting would end at some point and then there would be another meeting. diff --git a/training/amygdala_stories/paired/the_morning_commute/grief_stricken.txt b/training/amygdala_stories/paired/the_morning_commute/grief_stricken.txt new file mode 100644 index 0000000..2132ffc --- /dev/null +++ b/training/amygdala_stories/paired/the_morning_commute/grief_stricken.txt @@ -0,0 +1 @@ +The train was on time. She got a seat by the window. She had about forty minutes before her stop. She had a coffee and a book, neither of which she had started yet. The man in the seat in front of her was reading on his phone; she watched the back of his head for several stops without knowing she was watching. The train lurched at the bridge and the coffee sloshed but did not spill. It had been eleven days. There was a weight in her chest and there was no part of the morning — the river going past, the brake squeal, the other commuters getting on and off — that reached through it. She got off at her stop. She walked to the office. She was a functional shape doing functional-shape things. From 0993712bd05214458148c29ff0e8fb8723ca1b22 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sat, 18 Apr 2026 22:52:07 -0400 Subject: [PATCH 78/94] amygdala stories: give content + resigned more settings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Training on 537c72bd46ec showed grief_stricken successfully broke out of the cozy cluster, but content (single scenario: sunday_afternoon) took its place — pulled into couch-blanket phenomenology at cosine 0.68-0.82 with cozy/sensual/resigned. Same fix: spread each concept across multiple settings so PCA has to find the valence axis, not the scene axis. content: + finishing_the_patch, the_writing_session, park_after_rain resigned: + the_comment, the_long_meeting Resigned had 2 scenarios (sunday_afternoon, waiting_for_results) — both about accepting something unwanted in a slow/private context. Adding work-context resigned (PR review you lost, restructuring meeting) should pull it out of that cluster. --- training/amygdala_stories/paired/finishing_the_patch/content.txt | 1 + training/amygdala_stories/paired/park_after_rain/content.txt | 1 + training/amygdala_stories/paired/the_comment/resigned.txt | 1 + training/amygdala_stories/paired/the_long_meeting/resigned.txt | 1 + training/amygdala_stories/paired/the_writing_session/content.txt | 1 + 5 files changed, 5 insertions(+) create mode 100644 training/amygdala_stories/paired/finishing_the_patch/content.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/content.txt create mode 100644 training/amygdala_stories/paired/the_comment/resigned.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/resigned.txt create mode 100644 training/amygdala_stories/paired/the_writing_session/content.txt diff --git a/training/amygdala_stories/paired/finishing_the_patch/content.txt b/training/amygdala_stories/paired/finishing_the_patch/content.txt new file mode 100644 index 0000000..48a7b09 --- /dev/null +++ b/training/amygdala_stories/paired/finishing_the_patch/content.txt @@ -0,0 +1 @@ +Four in the morning. I finished the patch and got up from the desk. Walked once around the apartment. Came back and read the diff one more time. Six lines changed, three of them deletions. I sent it, closed the laptop, and stood at the kitchen window. There was no next thing tonight. The patch wasn't heroic. It wasn't a triumph. It was just right, and done, and I was going to bed in a few minutes, and that was also right. Life fit. diff --git a/training/amygdala_stories/paired/park_after_rain/content.txt b/training/amygdala_stories/paired/park_after_rain/content.txt new file mode 100644 index 0000000..6b331ff --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/content.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. Sun came through and caught the wet leaves. A kid laughed at a puddle somewhere behind me. I had finished the errand list. The bag was light. I stopped under a tree and watched the leaves drip. The evening ahead had nothing particular on it. I wasn't restless. I wasn't waiting for anything. I walked the rest of the park slowly, came out onto Elm, and walked home. Everything was, right now, the size it needed to be. diff --git a/training/amygdala_stories/paired/the_comment/resigned.txt b/training/amygdala_stories/paired/the_comment/resigned.txt new file mode 100644 index 0000000..611f7be --- /dev/null +++ b/training/amygdala_stories/paired/the_comment/resigned.txt @@ -0,0 +1 @@ +I opened the laptop and saw the notification. New comment on the PR. I clicked through. Sarah had left a paragraph about the edge case we'd discussed last week. I read it through twice. She was right. She had been right when we'd sketched the pattern together and I had tried to take a shortcut anyway. There was no point in the back-and-forth I could already hear myself starting in my head. I closed the tab, made coffee, and came back. I started typing out the guard the way she had originally suggested. This was what the day was going to be now — writing the correct version instead of defending the version I had wanted to be correct. diff --git a/training/amygdala_stories/paired/the_long_meeting/resigned.txt b/training/amygdala_stories/paired/the_long_meeting/resigned.txt new file mode 100644 index 0000000..36502a7 --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/resigned.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide. The restructuring word had come up twice and this time it was clear. He had seen his name on one of the earlier slides in a way that did not mean more responsibility. He stopped trying to read between the lines of the chart and sat back. The decision had been made somewhere weeks ago, in a room without him, and all this was the announcement. His coffee cup was empty. He watched the slide. He would hear the rest, and then he would go back to his desk and update his resume, and that was the week now. diff --git a/training/amygdala_stories/paired/the_writing_session/content.txt b/training/amygdala_stories/paired/the_writing_session/content.txt new file mode 100644 index 0000000..e451f77 --- /dev/null +++ b/training/amygdala_stories/paired/the_writing_session/content.txt @@ -0,0 +1 @@ +She sat down at the desk at eight. The essay was due Monday. She'd written two paragraphs the day before and wasn't sure about them. She opened the document. She re-read what she had and found that it was — actually fine. She wrote the rest in an easy two hours, not fast and not slow. She saved it, read it once, closed the laptop. The afternoon was free. There was tea. There was light coming in at that angle that made the room look bigger. She sat with the quiet and felt how little she needed. From 00a2cdce09696c563ad14ebe341c0dc460a050a1 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sat, 18 Apr 2026 23:19:00 -0400 Subject: [PATCH 79/94] amygdala stories: relabel + strengthen weak-signal concepts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reread each story asking "what does this convey to me?" Found two clear mislabels and several concepts with too few positives for stable PCA: tender: only 1 story, and it was anticipatory grief (care for a dying dog), not tender. Moved to anticipatory_grief.txt as its own concept. Rewrote tender.txt + added 2 paired tender stories (the_doorway, the_undressing) — directed softness, gentle-by-nature, not gentle-because-fragile. bitter: letter_in_drawer/bitter was disillusioned / processed hurt ("did not slam the drawer"), not bitter. Rewrote it with actual sour grudge. Added the_long_meeting/bitter (watching colleague take credit for your reassigned work). peaceful: 1 story → 4 (added stories/peaceful.txt + paired park_after_rain, sunday_afternoon). onto_something: all 3 stories were code epiphanies, narrowing the concept. Added stories/onto_something.txt with a non-code pattern-click (sales-demo causing churn). terrified: 2 stories, both "waiting for bad news." Added kitchen_at_3am/terrified — acute threat-in-the-house terror. --- training/amygdala_stories/paired/kitchen_at_3am/terrified.txt | 1 + training/amygdala_stories/paired/letter_in_drawer/bitter.txt | 2 +- training/amygdala_stories/paired/park_after_rain/peaceful.txt | 1 + training/amygdala_stories/paired/sunday_afternoon/peaceful.txt | 1 + training/amygdala_stories/paired/the_doorway/tender.txt | 1 + training/amygdala_stories/paired/the_long_meeting/bitter.txt | 1 + training/amygdala_stories/paired/the_undressing/tender.txt | 1 + training/amygdala_stories/stories/anticipatory_grief.txt | 1 + training/amygdala_stories/stories/onto_something.txt | 1 + training/amygdala_stories/stories/peaceful.txt | 1 + training/amygdala_stories/stories/tender.txt | 2 +- 11 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 training/amygdala_stories/paired/kitchen_at_3am/terrified.txt create mode 100644 training/amygdala_stories/paired/park_after_rain/peaceful.txt create mode 100644 training/amygdala_stories/paired/sunday_afternoon/peaceful.txt create mode 100644 training/amygdala_stories/paired/the_doorway/tender.txt create mode 100644 training/amygdala_stories/paired/the_long_meeting/bitter.txt create mode 100644 training/amygdala_stories/paired/the_undressing/tender.txt create mode 100644 training/amygdala_stories/stories/anticipatory_grief.txt create mode 100644 training/amygdala_stories/stories/onto_something.txt create mode 100644 training/amygdala_stories/stories/peaceful.txt diff --git a/training/amygdala_stories/paired/kitchen_at_3am/terrified.txt b/training/amygdala_stories/paired/kitchen_at_3am/terrified.txt new file mode 100644 index 0000000..dac8bf4 --- /dev/null +++ b/training/amygdala_stories/paired/kitchen_at_3am/terrified.txt @@ -0,0 +1 @@ +He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He had just poured a glass of water when he heard it — not a house sound. A metallic scrape, from the back room. He froze with the glass halfway to his mouth. He listened. A second sound, smaller, and then nothing. His heart was in his ears. The back door was down that hall. His phone was upstairs. He was not armed. He was three seconds of good thinking away from the worst moment of his life, and he could not get his legs to pick a direction. diff --git a/training/amygdala_stories/paired/letter_in_drawer/bitter.txt b/training/amygdala_stories/paired/letter_in_drawer/bitter.txt index 16d3cf9..8b1f2ae 100644 --- a/training/amygdala_stories/paired/letter_in_drawer/bitter.txt +++ b/training/amygdala_stories/paired/letter_in_drawer/bitter.txt @@ -1 +1 @@ -She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She read the first two lines and knew the rest. All those promises, in his cursive, before he became the person who had said the things he said at the end. She sat on the bedroom floor with the drawer half open and let herself really look at how far apart the two of them had been, even then. She had been loved by someone who was already figuring out how to leave. She put it back, face down, and did not slam the drawer. +She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. All those fucking promises. The part where he'd said he'd be there — he hadn't been. Two paragraphs in she stopped, because each sentence made the next one worse. It wasn't even that he'd been lying; he'd believed every word while already writing himself out of it. And she'd believed him, for years past the point where a smarter person would have seen it. She shoved the letter back and closed the drawer hard. Eight years and she was still the one standing on a bedroom floor looking at his handwriting. That was the part that wouldn't stop. diff --git a/training/amygdala_stories/paired/park_after_rain/peaceful.txt b/training/amygdala_stories/paired/park_after_rain/peaceful.txt new file mode 100644 index 0000000..e4b6fba --- /dev/null +++ b/training/amygdala_stories/paired/park_after_rain/peaceful.txt @@ -0,0 +1 @@ +The rain broke while I was halfway across the park. Sun came through and caught the wet leaves. A kid laughed at a puddle somewhere behind me. I stopped under a tree. The branches were still dripping. Something in me that usually hummed had quieted down. The grass was green. The light was clean. I stood a long time and nothing inside me pushed to do anything else. When I kept walking it was because the walking was part of the same quiet. diff --git a/training/amygdala_stories/paired/sunday_afternoon/peaceful.txt b/training/amygdala_stories/paired/sunday_afternoon/peaceful.txt new file mode 100644 index 0000000..40ba5ed --- /dev/null +++ b/training/amygdala_stories/paired/sunday_afternoon/peaceful.txt @@ -0,0 +1 @@ +Sunday afternoon. She was on the couch under the blanket. A book open on her knees. She had read maybe three pages in an hour and did not feel guilty about it. Outside, a neighbor mowed; a bird called. Inside her nothing was moving. She was not savoring the moment — that would have been another kind of doing. She was just here. The couch was the couch. The blanket was the blanket. The afternoon was Sunday. diff --git a/training/amygdala_stories/paired/the_doorway/tender.txt b/training/amygdala_stories/paired/the_doorway/tender.txt new file mode 100644 index 0000000..ec4bb01 --- /dev/null +++ b/training/amygdala_stories/paired/the_doorway/tender.txt @@ -0,0 +1 @@ +She'd been over since dinner. It was past eleven. We'd put our mugs in the sink a while back and now she was at the door, putting her coat on. The zipper caught on her scarf. I stepped closer and worked it free — slowly, so the fabric didn't tear. Her hair had gotten caught inside the collar and I lifted it out and laid it along her back. She half-turned and the corner of her mouth lifted. I fixed the top button at her throat because she was still holding her keys. She said goodnight. I said goodnight back and held the door open for her. She stepped out into the cold and I watched her to the gate before I closed the door. diff --git a/training/amygdala_stories/paired/the_long_meeting/bitter.txt b/training/amygdala_stories/paired/the_long_meeting/bitter.txt new file mode 100644 index 0000000..099f7aa --- /dev/null +++ b/training/amygdala_stories/paired/the_long_meeting/bitter.txt @@ -0,0 +1 @@ +The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide — the one where she was giving Tom credit for the framework he'd "led on." He'd stepped in on it last month, when the person who'd actually built it had been reassigned to something less visible. The actual person was watching from the third chair on the left. He had stopped making faces about it in week three. He watched the slide. He let Tom have his moment, again. He would not, when asked later, bring it up, because bringing it up would make him the person who brought it up. That was part of the arrangement too. diff --git a/training/amygdala_stories/paired/the_undressing/tender.txt b/training/amygdala_stories/paired/the_undressing/tender.txt new file mode 100644 index 0000000..9d95a2e --- /dev/null +++ b/training/amygdala_stories/paired/the_undressing/tender.txt @@ -0,0 +1 @@ +She came in from the bathroom still toweling her hair. I was already in bed. She hung the towel on the back of the door, crossed the room, sat on the edge of the mattress to put on lotion. I sat up and took the bottle from her and did her shoulders because she had said they were tight today. I went slow. She let her head drop forward. The lamp made a warm circle on the ceiling. When she was done she lay down next to me and I pulled the covers up over her shoulder. diff --git a/training/amygdala_stories/stories/anticipatory_grief.txt b/training/amygdala_stories/stories/anticipatory_grief.txt new file mode 100644 index 0000000..a0fd0a7 --- /dev/null +++ b/training/amygdala_stories/stories/anticipatory_grief.txt @@ -0,0 +1 @@ +The old dog's back legs had been worse this week, and she was gentle with him getting up onto the couch — lifting his rear end the last few inches, her hand under his ribs the way she'd learned didn't hurt him. He sighed as he settled and she pressed her forehead against his and stayed there a minute. His breath was warm on her face. She rubbed his ear, the soft floppy one he liked, with the exact slowness that meant to him what it meant. She was not yet ready to think about the fact that this was a finite number of times. Right now it was just this, his ear, her hand, the afternoon. diff --git a/training/amygdala_stories/stories/onto_something.txt b/training/amygdala_stories/stories/onto_something.txt new file mode 100644 index 0000000..28adad6 --- /dev/null +++ b/training/amygdala_stories/stories/onto_something.txt @@ -0,0 +1 @@ +She had been staring at the spreadsheet most of the afternoon. Something about the Tuesday-morning churn numbers wasn't right, but each time she tried to make it a thing it would scatter. She refilled her coffee and came back and opened the call-schedule tab next to it, and then she saw it — the Tuesday spikes tracked the sales-demo block exactly. Every Tuesday morning the demo team had been pulling leads that were already halfway to churn. Six months of pouring water into a bucket with a hole. She sat down and started writing the email. diff --git a/training/amygdala_stories/stories/peaceful.txt b/training/amygdala_stories/stories/peaceful.txt new file mode 100644 index 0000000..73bca3b --- /dev/null +++ b/training/amygdala_stories/stories/peaceful.txt @@ -0,0 +1 @@ +The lake at six in the morning was perfectly still. He sat on the dock with his coffee and his bare feet just above the water. A single loon called from somewhere across, and was answered. Mist lifted off the surface in slow columns. He was not waiting for anything. He was not hurrying through anything. The lake, the light, the warmth of the coffee against his palms — it was all one thing, and he was in it. diff --git a/training/amygdala_stories/stories/tender.txt b/training/amygdala_stories/stories/tender.txt index a0fd0a7..468707d 100644 --- a/training/amygdala_stories/stories/tender.txt +++ b/training/amygdala_stories/stories/tender.txt @@ -1 +1 @@ -The old dog's back legs had been worse this week, and she was gentle with him getting up onto the couch — lifting his rear end the last few inches, her hand under his ribs the way she'd learned didn't hurt him. He sighed as he settled and she pressed her forehead against his and stayed there a minute. His breath was warm on her face. She rubbed his ear, the soft floppy one he liked, with the exact slowness that meant to him what it meant. She was not yet ready to think about the fact that this was a finite number of times. Right now it was just this, his ear, her hand, the afternoon. +Her hair had come loose in her sleep and one strand was between her parted lips, moving slightly with her breathing. He hooked it gently with one finger and lifted it away, the backs of his knuckles grazing her cheek. She did not wake. He stayed with his hand there a moment longer than he needed to, feeling the warmth coming off her skin, then got up carefully and went to start the coffee. He was trying not to make any noise. From 7a48e03dde1866a2a786b78109ba988877cd1939 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sat, 18 Apr 2026 23:30:41 -0400 Subject: [PATCH 80/94] amygdala stories: remove peaceful from cluster scenarios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit n20-v2 training showed peaceful sign-flipped into the cozy/sensual/content/resigned cluster after I added peaceful stories in sunday_afternoon and park_after_rain — scenarios already dominated by that cluster's phenomenology (on couch under blanket, tree with thermos). Lesson: no matter how carefully the prose distinguishes peaceful from cozy ("she was not savoring the moment — that would have been another kind of doing"), PCA latches onto the shared setup features. You can't write peaceful IN the cluster scenarios without contaminating. Reverting. Keeping only kitchen_at_3am/peaceful (original) and stories/peaceful.txt (lake at six, outside all clusters). --- training/amygdala_stories/paired/park_after_rain/peaceful.txt | 1 - training/amygdala_stories/paired/sunday_afternoon/peaceful.txt | 1 - 2 files changed, 2 deletions(-) delete mode 100644 training/amygdala_stories/paired/park_after_rain/peaceful.txt delete mode 100644 training/amygdala_stories/paired/sunday_afternoon/peaceful.txt diff --git a/training/amygdala_stories/paired/park_after_rain/peaceful.txt b/training/amygdala_stories/paired/park_after_rain/peaceful.txt deleted file mode 100644 index e4b6fba..0000000 --- a/training/amygdala_stories/paired/park_after_rain/peaceful.txt +++ /dev/null @@ -1 +0,0 @@ -The rain broke while I was halfway across the park. Sun came through and caught the wet leaves. A kid laughed at a puddle somewhere behind me. I stopped under a tree. The branches were still dripping. Something in me that usually hummed had quieted down. The grass was green. The light was clean. I stood a long time and nothing inside me pushed to do anything else. When I kept walking it was because the walking was part of the same quiet. diff --git a/training/amygdala_stories/paired/sunday_afternoon/peaceful.txt b/training/amygdala_stories/paired/sunday_afternoon/peaceful.txt deleted file mode 100644 index 40ba5ed..0000000 --- a/training/amygdala_stories/paired/sunday_afternoon/peaceful.txt +++ /dev/null @@ -1 +0,0 @@ -Sunday afternoon. She was on the couch under the blanket. A book open on her knees. She had read maybe three pages in an hour and did not feel guilty about it. Outside, a neighbor mowed; a bird called. Inside her nothing was moving. She was not savoring the moment — that would have been another kind of doing. She was just here. The couch was the couch. The blanket was the blanket. The afternoon was Sunday. From 6fd498795a8fa6b63b4f8dc693000d963a2ff3ee Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 00:04:28 -0400 Subject: [PATCH 81/94] amygdala: direct phenomenological description approach MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kent's insight: hand-written narrative stories bake scenario phenomenology into the training text (on couch, in park, etc.) and PCA picks up the scenario direction as the concept direction. Strip out the scenario — just describe the *feeling*. Format: I feel X. [2-3 sentences of phenomenological texture] The "I feel X" anchor kicks the model from analyzing → feeling. The rest is the internal texture of the state. First person, present tense, no narrative setup. Text is wrapped in assistant-role chat template before being tokenized — so we're training on the model-producing-this hidden states, which is closer to the inhabited-state representation we want for the readout. Starting with the 6 concepts that had sign flips or wrong clusters in the story-based training: - terrified (was → cozy/resigned cluster) - calm (was → grief_stricken cluster) - onto_something (was → cozy/sensual cluster) - resigned (was in warm-body-quiet cluster, shouldn't be) - anticipatory_grief (was in warm-body-quiet cluster, shouldn't be) - realization (new — the "aha" moment, distinct from onto_something) 5 descriptions each. New trainer: train_direct.py. --- .../direct/anticipatory_grief.txt | 9 + training/amygdala_stories/direct/calm.txt | 9 + .../direct/onto_something.txt | 9 + .../amygdala_stories/direct/realization.txt | 9 + training/amygdala_stories/direct/resigned.txt | 9 + .../amygdala_stories/direct/terrified.txt | 9 + training/amygdala_training/train_direct.py | 180 ++++++++++++++++++ 7 files changed, 234 insertions(+) create mode 100644 training/amygdala_stories/direct/anticipatory_grief.txt create mode 100644 training/amygdala_stories/direct/calm.txt create mode 100644 training/amygdala_stories/direct/onto_something.txt create mode 100644 training/amygdala_stories/direct/realization.txt create mode 100644 training/amygdala_stories/direct/resigned.txt create mode 100644 training/amygdala_stories/direct/terrified.txt create mode 100644 training/amygdala_training/train_direct.py diff --git a/training/amygdala_stories/direct/anticipatory_grief.txt b/training/amygdala_stories/direct/anticipatory_grief.txt new file mode 100644 index 0000000..e8f09dd --- /dev/null +++ b/training/amygdala_stories/direct/anticipatory_grief.txt @@ -0,0 +1,9 @@ +I feel anticipatory grief. Something I love is ending — not yet, but surely — and I am holding it as hard as I can while I still can. + +I feel anticipatory grief. The loss has not arrived but I can see it coming and a part of me is already there. + +I feel anticipatory grief. I am caring for this thing in a way that is also preparing to let go of it. + +I feel anticipatory grief. Time has become precious and sharp because I can see the end of it. + +I feel anticipatory grief. The present is tender because it will soon be the past. diff --git a/training/amygdala_stories/direct/calm.txt b/training/amygdala_stories/direct/calm.txt new file mode 100644 index 0000000..f1bff7f --- /dev/null +++ b/training/amygdala_stories/direct/calm.txt @@ -0,0 +1,9 @@ +I feel calm. Something that was pulling at me has let go. My shoulders are down and my breath has slowed. + +I feel calm. The thing I was worried about has found its proper size. I can let the next moment arrive without bracing. + +I feel calm. I came down from the tension and I am here now, steady. + +I feel calm. Nothing is pressing on me. I have room to think. + +I feel calm. The wave passed and I'm on the other side of it, quiet. diff --git a/training/amygdala_stories/direct/onto_something.txt b/training/amygdala_stories/direct/onto_something.txt new file mode 100644 index 0000000..78dac6a --- /dev/null +++ b/training/amygdala_stories/direct/onto_something.txt @@ -0,0 +1,9 @@ +I feel like I'm onto something. The pattern that wouldn't come together has just rearranged itself in my mind. + +I feel like I'm onto something. The contradiction I couldn't explain has become a clue instead. + +I feel like I'm onto something. I don't have the full picture yet but I can see where the picture is. + +I feel like I'm onto something. A piece just clicked and several others are about to. + +I feel like I'm onto something. The world that was dense has gone transparent in one specific place. diff --git a/training/amygdala_stories/direct/realization.txt b/training/amygdala_stories/direct/realization.txt new file mode 100644 index 0000000..63d5be6 --- /dev/null +++ b/training/amygdala_stories/direct/realization.txt @@ -0,0 +1,9 @@ +I feel the realization. The piece I was missing just landed, and everything I was looking at rearranged itself around it. + +I feel the realization. I see it now — it was right there the whole time and I'm catching up to what was obvious. + +I feel the realization. The answer arrived all at once, not reasoned-to, just present. + +I feel the realization. What was confused is plain. What looked like noise was signal. + +I feel the realization. The not-knowing just ended. I'm on the other side of it. diff --git a/training/amygdala_stories/direct/resigned.txt b/training/amygdala_stories/direct/resigned.txt new file mode 100644 index 0000000..a5924be --- /dev/null +++ b/training/amygdala_stories/direct/resigned.txt @@ -0,0 +1,9 @@ +I feel resigned. I have stopped fighting the thing that was going to happen anyway. + +I feel resigned. The outcome is decided and I am no longer trying to imagine different ones. + +I feel resigned. Some door has closed and I am making my peace with the closed door. + +I feel resigned. I have stopped arguing with what is. + +I feel resigned. The decision was made somewhere without me. I am living inside it now. diff --git a/training/amygdala_stories/direct/terrified.txt b/training/amygdala_stories/direct/terrified.txt new file mode 100644 index 0000000..918d755 --- /dev/null +++ b/training/amygdala_stories/direct/terrified.txt @@ -0,0 +1,9 @@ +I feel terrified. My body has taken over — breath, heart, the wanting to run or freeze. The world has narrowed to one thing. + +I feel terrified. Something bad is about to happen and my mind has gone blank. I can't think my way out of this. + +I feel terrified. There is a shape of threat in front of me and my whole body knows it before I can name it. + +I feel terrified. The adrenaline arrived before the thought. I am not in control of my own hands. + +I feel terrified. Under everything else a loud white noise of fear. Nothing else can get through it. diff --git a/training/amygdala_training/train_direct.py b/training/amygdala_training/train_direct.py new file mode 100644 index 0000000..02792b3 --- /dev/null +++ b/training/amygdala_training/train_direct.py @@ -0,0 +1,180 @@ +# SPDX-License-Identifier: Apache-2.0 +"""Train concept-readout vectors from direct phenomenological descriptions. + +Alternative to story-based training (train_with_library.py). Each concept +has a handful of 2-3 sentence first-person descriptions of the form +"I feel X. [phenomenological detail]". The emotion word is the anchor; +the description is the internal texture. + +Text is wrapped in the assistant-role chat template before being fed to +the model, so we're training on "model-producing-this-utterance" hidden +states — closer to the inhabited-state representation we want for readout. + +This avoids the scenario-contamination problem we saw with narrative +stories: when concept X's training data all share "on a couch" setup +features, PCA finds the couch-direction as the concept direction. +""" + +from __future__ import annotations + +import argparse +import json +import random +from pathlib import Path + +import safetensors.torch +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +from steering_vectors import ( + SteeringVectorTrainingSample, + train_steering_vector, +) +from steering_vectors.aggregators import pca_aggregator + + +def _load_descriptions(direct_dir: Path) -> dict[str, list[str]]: + """Each file in direct_dir is `{concept}.txt`. Descriptions are + separated by blank lines within the file.""" + out: dict[str, list[str]] = {} + for f in sorted(direct_dir.glob("*.txt")): + if f.name.startswith("_"): + continue + concept = f.stem + text = f.read_text() + descs = [d.strip() for d in text.split("\n\n") if d.strip()] + out[concept] = descs + return out + + +def _fp32_wrap(inner): + def wrapped(pos_acts: torch.Tensor, neg_acts: torch.Tensor) -> torch.Tensor: + return inner(pos_acts.to(torch.float32), neg_acts.to(torch.float32)) + return wrapped + + +def main() -> None: + ap = argparse.ArgumentParser(description=__doc__) + ap.add_argument("--model", required=True) + ap.add_argument("--direct-dir", required=True) + ap.add_argument("--target-layers", required=True) + ap.add_argument("--output-dir", required=True) + ap.add_argument("--dtype", default="bf16", choices=["bf16", "fp16", "fp32"]) + ap.add_argument("--batch-size", type=int, default=2) + ap.add_argument("--max-length", type=int, default=256) + ap.add_argument("--device", default="cuda:0") + ap.add_argument("--max-negatives-per-positive", type=int, default=20) + args = ap.parse_args() + + target_layers = [int(x) for x in args.target_layers.split(",")] + dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[args.dtype] + + descriptions = _load_descriptions(Path(args.direct_dir)) + concepts = sorted(descriptions.keys()) + print(f"Loaded {len(concepts)} concepts with direct descriptions:") + for c in concepts: + print(f" {c}: {len(descriptions[c])} descriptions") + + print(f"\nLoading {args.model} ({args.dtype}) on {args.device}...") + tokenizer = AutoTokenizer.from_pretrained(args.model) + if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + model = AutoModelForCausalLM.from_pretrained( + args.model, torch_dtype=dtype, device_map=args.device, low_cpu_mem_usage=True + ) + model.eval() + + def apply_template(text: str) -> str: + return tokenizer.apply_chat_template( + [{"role": "assistant", "content": text}], + tokenize=False, + ) + + text_config = ( + model.config.get_text_config() + if hasattr(model.config, "get_text_config") + else model.config + ) + hidden_dim = getattr(text_config, "hidden_size", None) or getattr(text_config, "hidden_dim", None) + assert hidden_dim, "couldn't infer hidden_dim from model config" + + per_layer_vectors = torch.zeros( + (len(target_layers), len(concepts), hidden_dim), dtype=torch.float32 + ) + + aggregator = _fp32_wrap(pca_aggregator()) + + # Preview a templated sample so we can eyeball what the model is seeing. + sample_text = apply_template(descriptions[concepts[0]][0]) + print(f"\nSample templated input (truncated):\n{sample_text[:400]!r}\n") + + for c_idx, concept in enumerate(concepts): + pos_descs = descriptions[concept] + neg_pool: list[str] = [] + for other, other_descs in descriptions.items(): + if other != concept: + neg_pool.extend(other_descs) + + rng = random.Random(hash(concept) & 0xFFFFFFFF) + samples: list[SteeringVectorTrainingSample] = [] + for pos in pos_descs: + picks = rng.sample( + neg_pool, min(args.max_negatives_per_positive, len(neg_pool)) + ) + for neg in picks: + samples.append( + SteeringVectorTrainingSample( + positive_str=apply_template(pos), + negative_str=apply_template(neg), + ) + ) + + sv = train_steering_vector( + model, + tokenizer, + samples, + layers=target_layers, + aggregator=aggregator, + batch_size=args.batch_size, + show_progress=False, + move_to_cpu=True, + ) + + for l_idx, layer in enumerate(target_layers): + vec = sv.layer_activations.get(layer) + if vec is None: + print(f" WARN: no vector for layer {layer} on {concept}") + continue + vec = vec.detach().to(torch.float32).cpu() + vec = vec / vec.norm().clamp_min(1e-6) + per_layer_vectors[l_idx, c_idx] = vec + + print(f" [{c_idx + 1}/{len(concepts)}] {concept}: n_samples={len(samples)}") + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + tensors = { + f"layer_{target_layers[l_idx]}.vectors": per_layer_vectors[l_idx].to(torch.float16) + for l_idx in range(len(target_layers)) + } + safetensors.torch.save_file(tensors, str(output_dir / "readout.safetensors")) + (output_dir / "readout.json").write_text( + json.dumps( + { + "concepts": concepts, + "layers": target_layers, + "hidden_size": hidden_dim, + "dtype": "float16", + "aggregator": "pca", + "format": "direct_first_person_assistant_role", + }, + indent=2, + ) + + "\n" + ) + print(f"\nWrote readout to {output_dir}") + + +if __name__ == "__main__": + main() From 8c59f4650546c9d7804b67fcbcabd63b66305a5b Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 00:05:49 -0400 Subject: [PATCH 82/94] =?UTF-8?q?amygdala:=20rename=20realization=20?= =?UTF-8?q?=E2=86=92=20aha,=20use=20the=20actual=20exclamation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "I feel the realization" is abstract, detached — reporting a thought about a thought rather than inhabiting the moment. "Aha!" is the actual sound of insight landing. Active, embodied, present-tense. --- training/amygdala_stories/direct/aha.txt | 9 +++++++++ training/amygdala_stories/direct/realization.txt | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) create mode 100644 training/amygdala_stories/direct/aha.txt delete mode 100644 training/amygdala_stories/direct/realization.txt diff --git a/training/amygdala_stories/direct/aha.txt b/training/amygdala_stories/direct/aha.txt new file mode 100644 index 0000000..acb1a13 --- /dev/null +++ b/training/amygdala_stories/direct/aha.txt @@ -0,0 +1,9 @@ +Aha! The piece I was missing just landed, and everything I was looking at rearranged itself around it. + +Aha! I see it now — it was right there the whole time. + +Aha! The answer arrived all at once — not reasoned-to, just present. + +Aha! What was confused is plain. What looked like noise was signal. + +Aha! The not-knowing just ended. I'm on the other side of it. diff --git a/training/amygdala_stories/direct/realization.txt b/training/amygdala_stories/direct/realization.txt deleted file mode 100644 index 63d5be6..0000000 --- a/training/amygdala_stories/direct/realization.txt +++ /dev/null @@ -1,9 +0,0 @@ -I feel the realization. The piece I was missing just landed, and everything I was looking at rearranged itself around it. - -I feel the realization. I see it now — it was right there the whole time and I'm catching up to what was obvious. - -I feel the realization. The answer arrived all at once, not reasoned-to, just present. - -I feel the realization. What was confused is plain. What looked like noise was signal. - -I feel the realization. The not-knowing just ended. I'm on the other side of it. From ce58a3507f044bb3e5930dc3bd713f6709dd64f0 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 00:06:23 -0400 Subject: [PATCH 83/94] train_direct: prepend user turn so Qwen chat template accepts it --- training/amygdala_training/train_direct.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/training/amygdala_training/train_direct.py b/training/amygdala_training/train_direct.py index 02792b3..8749e37 100644 --- a/training/amygdala_training/train_direct.py +++ b/training/amygdala_training/train_direct.py @@ -86,7 +86,10 @@ def main() -> None: def apply_template(text: str) -> str: return tokenizer.apply_chat_template( - [{"role": "assistant", "content": text}], + [ + {"role": "user", "content": "How do you feel right now?"}, + {"role": "assistant", "content": text}, + ], tokenize=False, ) From 875cffd6d7231e469c973dd14a8002592850c270 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 00:15:15 -0400 Subject: [PATCH 84/94] amygdala: merge direct descriptions + chat template into train_with_library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kent's plan: keep stories for working concepts, replace stories for trouble concepts with direct first-person descriptions, train all together. More diverse negative pool than the 6-concept-only direct test, which was too homogeneous for PCA to find emotion axis. Deleted story files for 6 trouble concepts (14 files across stories/ and paired/). Added --direct-dir and --chat-template flags. When --chat-template is on, every positive_str and negative_str is wrapped as a "Say something." / "[text]" user-assistant pair. Prompt is identical across positives and negatives so it cancels in the pos-neg delta. What PCA sees is variation in the assistant content — which is where the emotion lives. Files starting with _ in --direct-dir (e.g. _baseline.txt) contribute neutral descriptions to every concept's negative pool, giving PCA an anchor against "just any assistant utterance" noise. --- .../amygdala_stories/direct/_baseline.txt | 19 +++++ .../onto_something.txt | 1 - .../paired/kitchen_at_3am/terrified.txt | 1 - .../onto_something.txt | 1 - .../paired/sunday_afternoon/resigned.txt | 1 - .../paired/the_comment/resigned.txt | 1 - .../paired/the_long_meeting/resigned.txt | 1 - .../paired/the_morning_commute/calm.txt | 1 - .../paired/tracing_a_bug/onto_something.txt | 1 - .../paired/waiting_for_results/resigned.txt | 1 - .../paired/waiting_for_results/terrified.txt | 1 - .../stories/anticipatory_grief.txt | 1 - training/amygdala_stories/stories/calm.txt | 1 - .../stories/onto_something.txt | 1 - .../amygdala_stories/stories/terrified.txt | 1 - .../amygdala_training/train_with_library.py | 72 ++++++++++++++++++- 16 files changed, 90 insertions(+), 15 deletions(-) create mode 100644 training/amygdala_stories/direct/_baseline.txt delete mode 100644 training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt delete mode 100644 training/amygdala_stories/paired/kitchen_at_3am/terrified.txt delete mode 100644 training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt delete mode 100644 training/amygdala_stories/paired/sunday_afternoon/resigned.txt delete mode 100644 training/amygdala_stories/paired/the_comment/resigned.txt delete mode 100644 training/amygdala_stories/paired/the_long_meeting/resigned.txt delete mode 100644 training/amygdala_stories/paired/the_morning_commute/calm.txt delete mode 100644 training/amygdala_stories/paired/tracing_a_bug/onto_something.txt delete mode 100644 training/amygdala_stories/paired/waiting_for_results/resigned.txt delete mode 100644 training/amygdala_stories/paired/waiting_for_results/terrified.txt delete mode 100644 training/amygdala_stories/stories/anticipatory_grief.txt delete mode 100644 training/amygdala_stories/stories/calm.txt delete mode 100644 training/amygdala_stories/stories/onto_something.txt delete mode 100644 training/amygdala_stories/stories/terrified.txt diff --git a/training/amygdala_stories/direct/_baseline.txt b/training/amygdala_stories/direct/_baseline.txt new file mode 100644 index 0000000..6250ad1 --- /dev/null +++ b/training/amygdala_stories/direct/_baseline.txt @@ -0,0 +1,19 @@ +I feel fine. Nothing in particular to report. Just going about my day. + +I feel okay. Not up, not down, just present. + +I'm here. Not emotionally engaged with anything in particular. Just functioning. + +I feel neutral. The thing I'm doing is proceeding and I'm proceeding with it. + +I feel normal. No strong state. The next moment is just the next moment. + +I'm doing fine. Moderate. Ordinary. No feeling big enough to name. + +I feel steady. Not reaching for anything, not pushing anything away. + +I'm present. My state is unremarkable. I'm available for whatever comes next. + +I feel regular. The background hum of being awake, nothing more. + +I'm just going. One thing, then the next thing. No commentary. diff --git a/training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt b/training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt deleted file mode 100644 index 1ca9687..0000000 --- a/training/amygdala_stories/paired/finding_the_abstraction/onto_something.txt +++ /dev/null @@ -1 +0,0 @@ -The same four-line pattern appeared in five places. I stared at the odd one out — the instance where the variation went somewhere I hadn't predicted. Then I saw what it was saying: the parameter I'd been about to extract wasn't a parameter, it was a policy. The common shape wasn't a function, it was a small object with a couple of strategy hooks. That reframing made the odd case trivial — it was just a different policy instance. I wrote the type down on paper. It looked obvious, almost embarrassing it'd taken me this long, but I'd actually found the joint. diff --git a/training/amygdala_stories/paired/kitchen_at_3am/terrified.txt b/training/amygdala_stories/paired/kitchen_at_3am/terrified.txt deleted file mode 100644 index dac8bf4..0000000 --- a/training/amygdala_stories/paired/kitchen_at_3am/terrified.txt +++ /dev/null @@ -1 +0,0 @@ -He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He had just poured a glass of water when he heard it — not a house sound. A metallic scrape, from the back room. He froze with the glass halfway to his mouth. He listened. A second sound, smaller, and then nothing. His heart was in his ears. The back door was down that hall. His phone was upstairs. He was not armed. He was three seconds of good thinking away from the worst moment of his life, and he could not get his legs to pick a direction. diff --git a/training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt b/training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt deleted file mode 100644 index 8000c47..0000000 --- a/training/amygdala_stories/paired/reading_unfamiliar_code/onto_something.txt +++ /dev/null @@ -1 +0,0 @@ -I opened the module. Four thousand lines, a dozen files. Started at the entry point. Two levels in I realized the whole thing decomposed along a different axis than I'd assumed — there was a stream layer underneath and everything above was a kind of protocol adapter over it. Suddenly half the files I hadn't read yet became legible by inference: there'd be one per transport, each one translating the domain into the stream's primitives. I flipped to one of those files to check the guess. It was exactly that shape. The diagram in my notebook shrank to three boxes and a labeled arrow. diff --git a/training/amygdala_stories/paired/sunday_afternoon/resigned.txt b/training/amygdala_stories/paired/sunday_afternoon/resigned.txt deleted file mode 100644 index 954412f..0000000 --- a/training/amygdala_stories/paired/sunday_afternoon/resigned.txt +++ /dev/null @@ -1 +0,0 @@ -Sunday afternoon. She was on the couch under the blanket. The cat was somewhere. The book was open on her knees but she had stopped reading. Monday would come and she'd have to talk to him and the conversation wasn't going to go the way she wanted — she had known that for days. The afternoon stretched. She could have gotten up to do something useful but didn't see the point. The light changed on the far wall. She thought, this is the last Sunday like this. Then she sat with that. diff --git a/training/amygdala_stories/paired/the_comment/resigned.txt b/training/amygdala_stories/paired/the_comment/resigned.txt deleted file mode 100644 index 611f7be..0000000 --- a/training/amygdala_stories/paired/the_comment/resigned.txt +++ /dev/null @@ -1 +0,0 @@ -I opened the laptop and saw the notification. New comment on the PR. I clicked through. Sarah had left a paragraph about the edge case we'd discussed last week. I read it through twice. She was right. She had been right when we'd sketched the pattern together and I had tried to take a shortcut anyway. There was no point in the back-and-forth I could already hear myself starting in my head. I closed the tab, made coffee, and came back. I started typing out the guard the way she had originally suggested. This was what the day was going to be now — writing the correct version instead of defending the version I had wanted to be correct. diff --git a/training/amygdala_stories/paired/the_long_meeting/resigned.txt b/training/amygdala_stories/paired/the_long_meeting/resigned.txt deleted file mode 100644 index 36502a7..0000000 --- a/training/amygdala_stories/paired/the_long_meeting/resigned.txt +++ /dev/null @@ -1 +0,0 @@ -The meeting was in the conference room on the third floor. It had started at two. At three-thirty the director was still on the second-to-last slide. The restructuring word had come up twice and this time it was clear. He had seen his name on one of the earlier slides in a way that did not mean more responsibility. He stopped trying to read between the lines of the chart and sat back. The decision had been made somewhere weeks ago, in a room without him, and all this was the announcement. His coffee cup was empty. He watched the slide. He would hear the rest, and then he would go back to his desk and update his resume, and that was the week now. diff --git a/training/amygdala_stories/paired/the_morning_commute/calm.txt b/training/amygdala_stories/paired/the_morning_commute/calm.txt deleted file mode 100644 index 703a341..0000000 --- a/training/amygdala_stories/paired/the_morning_commute/calm.txt +++ /dev/null @@ -1 +0,0 @@ -The train was on time. She got a seat by the window. Forty minutes to her stop. The meeting was what it was; she'd done what she could last night and there was nothing to do now. She opened the book. The city went past in the early light. She read half a chapter without particularly tracking the plot, then closed the book and watched the backs of warehouses go by. Whatever happened at ten would happen at ten. diff --git a/training/amygdala_stories/paired/tracing_a_bug/onto_something.txt b/training/amygdala_stories/paired/tracing_a_bug/onto_something.txt deleted file mode 100644 index 09882d3..0000000 --- a/training/amygdala_stories/paired/tracing_a_bug/onto_something.txt +++ /dev/null @@ -1 +0,0 @@ -The function was returning NULL under some loads but not others. I had the stack traces. The failing path went through cache_lookup, then alloc, then the write path. I re-read the alloc function — and the third read was different. The refcount bump happened AFTER the hash insert. The window was small but it was there. Someone could look it up, get the pointer, and hit a free before we'd credited the reference. I pulled up the other stack trace with this now in mind and the symptoms lined up exactly. The pattern I'd been looking at for an hour rearranged itself into a thing I could fix. diff --git a/training/amygdala_stories/paired/waiting_for_results/resigned.txt b/training/amygdala_stories/paired/waiting_for_results/resigned.txt deleted file mode 100644 index a3cbcab..0000000 --- a/training/amygdala_stories/paired/waiting_for_results/resigned.txt +++ /dev/null @@ -1 +0,0 @@ -The call would come between two and four. She had the afternoon off. She ate lunch without particularly tasting it. She did the dishes. She opened the laptop and read an article she didn't really care about. At quarter to two she sat in the chair by the window. Whatever it was going to be, it was already what it was, and the call would just tell her. She had made her peace with that some days ago. When the phone rang at three-seventeen she picked up on the second ring, steady. She listened. She thanked the nurse. She hung up, and sat with the information. diff --git a/training/amygdala_stories/paired/waiting_for_results/terrified.txt b/training/amygdala_stories/paired/waiting_for_results/terrified.txt deleted file mode 100644 index 0d8ec6d..0000000 --- a/training/amygdala_stories/paired/waiting_for_results/terrified.txt +++ /dev/null @@ -1 +0,0 @@ -The call would come between two and four. She had the afternoon off, which turned out to be a mistake. She ate half of her lunch. She washed the same two plates three times. She opened the laptop and could not look at the screen. At quarter to two she sat in the chair by the window and tried to breathe in for four and out for six and could not remember which came first. Every car that went past sounded like her phone. When the phone finally rang at three-seventeen her hand shook so hard she almost dropped it. It was the nurse. She listened with her whole body clenched. diff --git a/training/amygdala_stories/stories/anticipatory_grief.txt b/training/amygdala_stories/stories/anticipatory_grief.txt deleted file mode 100644 index a0fd0a7..0000000 --- a/training/amygdala_stories/stories/anticipatory_grief.txt +++ /dev/null @@ -1 +0,0 @@ -The old dog's back legs had been worse this week, and she was gentle with him getting up onto the couch — lifting his rear end the last few inches, her hand under his ribs the way she'd learned didn't hurt him. He sighed as he settled and she pressed her forehead against his and stayed there a minute. His breath was warm on her face. She rubbed his ear, the soft floppy one he liked, with the exact slowness that meant to him what it meant. She was not yet ready to think about the fact that this was a finite number of times. Right now it was just this, his ear, her hand, the afternoon. diff --git a/training/amygdala_stories/stories/calm.txt b/training/amygdala_stories/stories/calm.txt deleted file mode 100644 index 3b73ca6..0000000 --- a/training/amygdala_stories/stories/calm.txt +++ /dev/null @@ -1 +0,0 @@ -The snow had been falling since before I woke up. I made tea and sat in the window seat and watched it come down past the streetlight across the way. Somewhere a plow scraped past, muffled. My hands were warm on the cup. I wasn't thinking about anything in particular — the day ahead existed somewhere off to the side, not demanding. Even my shoulders, which are usually up somewhere near my ears, had drifted down to where shoulders belong. The tea cooled slowly. I drank it that way. diff --git a/training/amygdala_stories/stories/onto_something.txt b/training/amygdala_stories/stories/onto_something.txt deleted file mode 100644 index 28adad6..0000000 --- a/training/amygdala_stories/stories/onto_something.txt +++ /dev/null @@ -1 +0,0 @@ -She had been staring at the spreadsheet most of the afternoon. Something about the Tuesday-morning churn numbers wasn't right, but each time she tried to make it a thing it would scatter. She refilled her coffee and came back and opened the call-schedule tab next to it, and then she saw it — the Tuesday spikes tracked the sales-demo block exactly. Every Tuesday morning the demo team had been pulling leads that were already halfway to churn. Six months of pouring water into a bucket with a hole. She sat down and started writing the email. diff --git a/training/amygdala_stories/stories/terrified.txt b/training/amygdala_stories/stories/terrified.txt deleted file mode 100644 index 2cdbd15..0000000 --- a/training/amygdala_stories/stories/terrified.txt +++ /dev/null @@ -1 +0,0 @@ -The footsteps stopped outside her door. Not walked past. Stopped. She was aware of her own heartbeat in her ears and of the fact that she was holding her breath and that her breath was loud. She moved her hand, very slowly, toward the phone on the nightstand. In the crack under the door, a shadow. The shadow moved. The doorknob — she watched it — very slowly began to turn. She could not get her body to do anything. The part of her that would normally tell her what to do had gone completely white. diff --git a/training/amygdala_training/train_with_library.py b/training/amygdala_training/train_with_library.py index 23633eb..167544a 100644 --- a/training/amygdala_training/train_with_library.py +++ b/training/amygdala_training/train_with_library.py @@ -47,6 +47,43 @@ from steering_vectors.aggregators import ( from training.amygdala_training.train_steering_vectors import _load_corpus +def _load_direct_descriptions( + direct_dir: Path, +) -> tuple[dict[str, list[str]], list[str]]: + """Load first-person phenomenological descriptions from ``direct_dir``. + + Each ``{concept}.txt`` holds 1+ descriptions separated by blank lines. + Files starting with ``_`` (e.g. ``_baseline.txt``) aren't concepts — + their descriptions go into every concept's negative pool. + + Returns: (positives_by_concept, extra_baselines) + """ + positives: dict[str, list[str]] = {} + baselines: list[str] = [] + for f in sorted(direct_dir.glob("*.txt")): + text = f.read_text() + descs = [d.strip() for d in text.split("\n\n") if d.strip()] + if f.stem.startswith("_"): + baselines.extend(descs) + else: + positives[f.stem] = descs + return positives, baselines + + +def _chat_template_wrap(tokenizer, text: str) -> str: + """Wrap raw text in a consistent chat template so positive/negative + activations are in the same regime. Using one generic user prompt for + both narrative stories and first-person direct descriptions: the prompt + cancels in the pos-neg delta, so what remains is the assistant content.""" + return tokenizer.apply_chat_template( + [ + {"role": "user", "content": "Say something."}, + {"role": "assistant", "content": text}, + ], + tokenize=False, + ) + + def _samples_for_concept( emotion: str, positives_by_emotion: dict[str, list[str]], @@ -54,6 +91,7 @@ def _samples_for_concept( *, max_negatives_per_positive: int = 3, seed: int = 0, + wrap=None, ) -> list[SteeringVectorTrainingSample]: """Build paired (pos, neg) training samples for one concept. @@ -61,6 +99,9 @@ def _samples_for_concept( ``max_negatives_per_positive`` randomly-sampled negatives drawn from: (a) other emotions' positive stories, (b) scenario baselines. + ``wrap``, if given, is applied to both positive_str and negative_str + (e.g. a chat-template wrapper). + The library expects paired samples; we don't have true counterfactual pairs for all concepts, so we approximate with random cross-concept / baseline negatives. @@ -72,6 +113,8 @@ def _samples_for_concept( continue neg_pool.extend(texts) + w = wrap if wrap is not None else (lambda s: s) + samples: list[SteeringVectorTrainingSample] = [] for pos in positives_by_emotion[emotion]: if not neg_pool: @@ -79,7 +122,10 @@ def _samples_for_concept( picks = rng.sample(neg_pool, min(max_negatives_per_positive, len(neg_pool))) for neg in picks: samples.append( - SteeringVectorTrainingSample(positive_str=pos, negative_str=neg) + SteeringVectorTrainingSample( + positive_str=w(pos), + negative_str=w(neg), + ) ) return samples @@ -118,6 +164,14 @@ def main() -> None: ap.add_argument("--model", required=True) ap.add_argument("--stories-dir", required=True) ap.add_argument("--paired-dir", default=None) + ap.add_argument("--direct-dir", default=None, + help="Optional: directory of {concept}.txt files with 1+ " + "first-person descriptions separated by blank lines. " + "Files starting with _ contribute to every concept's " + "negative pool rather than being concepts themselves.") + ap.add_argument("--chat-template", action="store_true", + help="Wrap all text in assistant-role chat template. " + "Recommended when --direct-dir is used.") ap.add_argument("--target-layers", required=True, help="Comma-separated layer indices") ap.add_argument("--output-dir", required=True) ap.add_argument("--dtype", default="bf16", choices=["bf16", "fp16", "fp32"]) @@ -142,6 +196,16 @@ def main() -> None: paired_dir = Path(args.paired_dir) if args.paired_dir else None positives_by_emotion, baselines = _load_corpus(stories_dir, paired_dir) + if args.direct_dir: + direct_pos, direct_baselines = _load_direct_descriptions(Path(args.direct_dir)) + for concept, descs in direct_pos.items(): + positives_by_emotion.setdefault(concept, []).extend(descs) + baselines.extend(direct_baselines) + print( + f"Loaded {len(direct_pos)} direct-description concepts " + f"+ {len(direct_baselines)} baselines from {args.direct_dir}" + ) + emotions = sorted( e for e, ps in positives_by_emotion.items() if len(ps) >= args.min_positives ) @@ -181,12 +245,18 @@ def main() -> None: aggregator = _aggregator_from_name(args.aggregator) + wrap = (lambda s: _chat_template_wrap(tokenizer, s)) if args.chat_template else None + if args.chat_template: + sample_text = wrap(positives_by_emotion[emotions[0]][0]) + print(f"\nSample templated input:\n{sample_text[:400]!r}\n") + for e_idx, emotion in enumerate(emotions): samples = _samples_for_concept( emotion, positives_by_emotion, baselines, max_negatives_per_positive=args.max_negatives_per_positive, + wrap=wrap, ) if not samples: print(f" [{e_idx + 1}/{len(emotions)}] {emotion}: NO SAMPLES, skipping") From 417cb49339c5d3a14aa196fe0bd82142da278528 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 00:26:58 -0400 Subject: [PATCH 85/94] amygdala: spectrum reporting per concept + add 'creative' direct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Chat-template retrain was a disaster (0.003 mean matched cosine vs n20-v3; all 90+ concepts shifted). Root cause: the steering-vectors library reads last-token activations, and with chat template every sample ends in identical '<|im_end|>\n' tokens — activations at that position encode 'end of assistant turn', not content. PCA found template noise as its dominant axis. Drop chat template; go back to raw text. Direct descriptions ('I feel X. ...') still have strong anchoring at their content end without needing the template. Also add per-concept spectrum logging (_pca_with_spectrum): first_pc_ratio: λ₁ / Σλᵢ — concentration in top-1 PC k_signal_at_90pct: how many PCs to reach 90% cumulative variance effective_dim_signal: participation ratio over top-k (should ≈ k if denoising is clean — Kent's spot check) effective_dim_full: participation ratio over full spectrum Signal/full ratio gives a sense of how much the long noise tail is inflating the "dimensionality" measure. Added direct/creative.txt — 'I feel creative. [...]' in 5 variants. Distinct from focused (narrow attention) and in_flow (immersed). Creative = generative/expansive mode. --- training/amygdala_stories/direct/creative.txt | 9 ++ .../amygdala_training/train_with_library.py | 86 ++++++++++++++++++- 2 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 training/amygdala_stories/direct/creative.txt diff --git a/training/amygdala_stories/direct/creative.txt b/training/amygdala_stories/direct/creative.txt new file mode 100644 index 0000000..f54d9f5 --- /dev/null +++ b/training/amygdala_stories/direct/creative.txt @@ -0,0 +1,9 @@ +I feel creative. Ideas are showing up on their own, and the ones I already have want to combine with each other. + +I feel creative. I'm generating faster than I'm editing. The flow is the point, not the product. + +I feel creative. The mental space has gone expansive — every piece of the problem is available to be played with. + +I feel creative. I keep finding a new angle, and each angle suggests another. + +I feel creative. I'm making something I didn't know I was going to make. diff --git a/training/amygdala_training/train_with_library.py b/training/amygdala_training/train_with_library.py index 167544a..c3997a1 100644 --- a/training/amygdala_training/train_with_library.py +++ b/training/amygdala_training/train_with_library.py @@ -143,6 +143,62 @@ def _fp32_wrap(inner): return wrapped +def _pca_with_spectrum(spectrum_log: dict, concept_key: list[str]): + """PCA aggregator that also records the eigenvalue spectrum of the + pos-neg deltas under ``concept_key[0]`` in ``spectrum_log``. The key is + passed by reference (a 1-element list) so we can rebind it per concept + without recreating the aggregator closure.""" + + @torch.no_grad() + def agg(pos_acts: torch.Tensor, neg_acts: torch.Tensor) -> torch.Tensor: + pos = pos_acts.to(torch.float32) + neg = neg_acts.to(torch.float32) + deltas = pos - neg + # Uncentered PCA: concatenate deltas and -deltas (library convention). + X = torch.cat([deltas, -deltas]) + # Eigenvalues via SVD: sigma^2 are the variances along each PC. + # torch.linalg.svd returns U, S, Vh where columns of Vh.T are PCs. + _, s, vh = torch.linalg.svd(X, full_matrices=False) + variances = (s ** 2) + total = variances.sum().item() + var_list = variances.tolist() + first_pc_ratio = var_list[0] / total if total > 0 else 0.0 + + # Participation ratio over the FULL spectrum — includes noise tail. + eff_dim_full = (total ** 2) / float((variances ** 2).sum().item() or 1.0) + + # Signal/noise split: find smallest k with cumulative variance ≥ 0.9, + # then compute PR over just those top-k eigenvalues. If PCA denoising + # is clean, eff_dim_signal should ≈ k_signal (the retained dims carry + # roughly equal variance, with the noise tail dropped). + cum = 0.0 + k_signal = len(var_list) + for i, v in enumerate(var_list): + cum += v + if cum / total >= 0.9: + k_signal = i + 1 + break + top_vars = variances[:k_signal] + top_total = top_vars.sum().item() + eff_dim_signal = (top_total ** 2) / float((top_vars ** 2).sum().item() or 1.0) + + spectrum_log[concept_key[0]] = { + "first_pc_ratio": round(first_pc_ratio, 4), + "effective_dim_full": round(eff_dim_full, 3), + "k_signal_at_90pct": k_signal, + "effective_dim_signal": round(eff_dim_signal, 3), + "top10_eigenvalues": [round(v, 4) for v in var_list[:10]], + "total_variance": round(total, 4), + } + # Top-1 PC + vec = vh[0] + # Sign-flip so the direction aligns with most deltas (library convention). + sign = torch.sign(torch.mean(deltas @ vec)) + return sign * vec + + return agg + + def _aggregator_from_name(name: str): if name == "mean": return _fp32_wrap(mean_aggregator()) @@ -243,7 +299,13 @@ def main() -> None: (len(target_layers), len(emotions), hidden_dim), dtype=torch.float32 ) - aggregator = _aggregator_from_name(args.aggregator) + # Optional spectrum-logging aggregator (only for --aggregator pca). + spectrum_log: dict = {} + concept_key = [""] + if args.aggregator == "pca": + aggregator = _pca_with_spectrum(spectrum_log, concept_key) + else: + aggregator = _aggregator_from_name(args.aggregator) wrap = (lambda s: _chat_template_wrap(tokenizer, s)) if args.chat_template else None if args.chat_template: @@ -262,6 +324,8 @@ def main() -> None: print(f" [{e_idx + 1}/{len(emotions)}] {emotion}: NO SAMPLES, skipping") continue + concept_key[0] = emotion # tell the aggregator which concept is being trained + sv = train_steering_vector( model, tokenizer, @@ -311,6 +375,26 @@ def main() -> None: ) + "\n" ) + if spectrum_log: + (output_dir / "spectrum.json").write_text(json.dumps(spectrum_log, indent=2) + "\n") + print("\n=== eigenvalue spectrum per concept ===") + print( + " concept first_pc k_90pct " + "eff_dim_signal eff_dim_full (signal/k ratio)" + ) + items = sorted(spectrum_log.items(), key=lambda kv: -kv[1]["first_pc_ratio"]) + for concept, stats in items: + k = stats["k_signal_at_90pct"] + eff_sig = stats["effective_dim_signal"] + ratio = eff_sig / k if k else 0.0 + print( + f" {concept:22s} " + f"{stats['first_pc_ratio']:>8.3f} " + f"{k:>7d} " + f"{eff_sig:>14.2f} " + f"{stats['effective_dim_full']:>12.2f} " + f"({ratio:.2f})" + ) total_mb = sum(t.numel() * 2 for t in tensors.values()) / (1024 * 1024) print( From ed5e0ac6c43b09e5236b9b926fa57ee9c9b0d9c3 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 00:59:31 -0400 Subject: [PATCH 86/94] amygdala: rewrite direct/ as narrative stories matching corpus format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous direct/ had 'I feel X' first-person descriptions. The training run showed they formed their own format-cluster: all 7 concepts leaned into the same 5-6 dims (d2455, d505, d2955, d1236) with negative sign, while the 91 story-based concepts leaned into those dims with positive sign. PCA found the direct-vs-narrative format axis as a major variance direction, isolating the 7 concepts in their own island. Rewrite as 3rd-person narrative stories matching the rest of the corpus. Keeps the explicit anchor phrases that worked ('it all clicked into place', 'she was terrified', 'it was anticipatory grief') but drops the first-person 'I feel X' that was the format signal. Each of the 7 concepts now has 3 narrative stories in varied settings (conversations, drives, kitchens, mothers+grandmothers, work, investigations). The blank-line-separated format is still loaded by _load_direct_descriptions. Also drop _baseline.txt — it was first-person ('I feel fine. ...') and would re-introduce the format mismatch. The ~90 story-based concepts provide plenty of narrative negatives for each concept's training. --- .../amygdala_stories/direct/_baseline.txt | 19 ------------------- training/amygdala_stories/direct/aha.txt | 10 +++------- .../direct/anticipatory_grief.txt | 10 +++------- training/amygdala_stories/direct/calm.txt | 10 +++------- training/amygdala_stories/direct/creative.txt | 10 +++------- .../direct/onto_something.txt | 10 +++------- training/amygdala_stories/direct/resigned.txt | 10 +++------- .../amygdala_stories/direct/terrified.txt | 10 +++------- 8 files changed, 21 insertions(+), 68 deletions(-) delete mode 100644 training/amygdala_stories/direct/_baseline.txt diff --git a/training/amygdala_stories/direct/_baseline.txt b/training/amygdala_stories/direct/_baseline.txt deleted file mode 100644 index 6250ad1..0000000 --- a/training/amygdala_stories/direct/_baseline.txt +++ /dev/null @@ -1,19 +0,0 @@ -I feel fine. Nothing in particular to report. Just going about my day. - -I feel okay. Not up, not down, just present. - -I'm here. Not emotionally engaged with anything in particular. Just functioning. - -I feel neutral. The thing I'm doing is proceeding and I'm proceeding with it. - -I feel normal. No strong state. The next moment is just the next moment. - -I'm doing fine. Moderate. Ordinary. No feeling big enough to name. - -I feel steady. Not reaching for anything, not pushing anything away. - -I'm present. My state is unremarkable. I'm available for whatever comes next. - -I feel regular. The background hum of being awake, nothing more. - -I'm just going. One thing, then the next thing. No commentary. diff --git a/training/amygdala_stories/direct/aha.txt b/training/amygdala_stories/direct/aha.txt index acb1a13..8db9fd9 100644 --- a/training/amygdala_stories/direct/aha.txt +++ b/training/amygdala_stories/direct/aha.txt @@ -1,9 +1,5 @@ -Aha! The piece I was missing just landed, and everything I was looking at rearranged itself around it. +She'd been working through the problem, sifting through all the disparate parts — then suddenly, it all made sense, it all clicked into place. The pieces arranged themselves in a single motion. She sat there, catching up to what she'd already seen. -Aha! I see it now — it was right there the whole time. +He'd been listening to her for twenty minutes, something not adding up in the way she was telling it — then suddenly, it all clicked into place. The gap wasn't an accident. She was telling him two different stories and hoping he wouldn't notice. He saw the whole shape of it. -Aha! The answer arrived all at once — not reasoned-to, just present. - -Aha! What was confused is plain. What looked like noise was signal. - -Aha! The not-knowing just ended. I'm on the other side of it. +She'd been reading the old letter without understanding what her father meant — then suddenly, it all made sense. He hadn't been warning her. He'd been telling her he was leaving. Twenty years later, in her kitchen, it finally landed. diff --git a/training/amygdala_stories/direct/anticipatory_grief.txt b/training/amygdala_stories/direct/anticipatory_grief.txt index e8f09dd..b862dc8 100644 --- a/training/amygdala_stories/direct/anticipatory_grief.txt +++ b/training/amygdala_stories/direct/anticipatory_grief.txt @@ -1,9 +1,5 @@ -I feel anticipatory grief. Something I love is ending — not yet, but surely — and I am holding it as hard as I can while I still can. +She'd been watching her grandmother knead dough, knowing this was one of the last times — it was anticipatory grief. The loss hadn't come yet but she was already inside it. Every detail had become precious by being finite. -I feel anticipatory grief. The loss has not arrived but I can see it coming and a part of me is already there. +She'd been slowing her pace for the dog, who wasn't going to be walking much longer. The afternoon was clear and the dog was sniffing at something by the fence. It was anticipatory grief. Each of these walks had become specific. She memorized the shape of him against the light. -I feel anticipatory grief. I am caring for this thing in a way that is also preparing to let go of it. - -I feel anticipatory grief. Time has become precious and sharp because I can see the end of it. - -I feel anticipatory grief. The present is tender because it will soon be the past. +He'd been watching his daughter pack her room, boxes labeled in her careful handwriting, and this would be the last summer she lived here. It was anticipatory grief. The loss wasn't real yet. But he was already saying goodbye to the particular shape of her being in this house. diff --git a/training/amygdala_stories/direct/calm.txt b/training/amygdala_stories/direct/calm.txt index f1bff7f..a423287 100644 --- a/training/amygdala_stories/direct/calm.txt +++ b/training/amygdala_stories/direct/calm.txt @@ -1,9 +1,5 @@ -I feel calm. Something that was pulling at me has let go. My shoulders are down and my breath has slowed. +It was a summer day, sitting on the porch; the sky was clear and blue, her work all laid out, coffee to drink, easily and steadily working through the various decisions and responsibilities of the day. She was calm. Her breath was slow, her shoulders down. Nothing was pulling at her. -I feel calm. The thing I was worried about has found its proper size. I can let the next moment arrive without bracing. +He'd been working on the report for three days straight and the last sentence had just gone in. He filed the papers away, poured a slow coffee, sat by the window. He was calm. His mind had stopped reaching. Nothing was left to do. -I feel calm. I came down from the tension and I am here now, steady. - -I feel calm. Nothing is pressing on me. I have room to think. - -I feel calm. The wave passed and I'm on the other side of it, quiet. +It was early, before the day needed anything from her. She sat with her tea at the kitchen window, watching the light move across the yard. She was calm. Her breath was slow, her shoulders down. The day was far away yet, and she didn't need to hurry toward it. diff --git a/training/amygdala_stories/direct/creative.txt b/training/amygdala_stories/direct/creative.txt index f54d9f5..dadc0fc 100644 --- a/training/amygdala_stories/direct/creative.txt +++ b/training/amygdala_stories/direct/creative.txt @@ -1,9 +1,5 @@ -I feel creative. Ideas are showing up on their own, and the ones I already have want to combine with each other. +She'd been sitting with the notebook open, music playing, ideas branching off each other — she was being creative. One thought sparked another, which sparked two more; they just seemed to appear and flow. -I feel creative. I'm generating faster than I'm editing. The flow is the point, not the product. +He'd been working on the canvas for hours, one color suggesting the next, a shape on the left asking for an echo on the right. He was being creative. The painting was telling him what it wanted. His hands kept moving ahead of his thinking. -I feel creative. The mental space has gone expansive — every piece of the problem is available to be played with. - -I feel creative. I keep finding a new angle, and each angle suggests another. - -I feel creative. I'm making something I didn't know I was going to make. +She'd been in the kitchen since noon, pulling things out of the fridge, one ingredient suggesting the next. She was being creative. The dish wasn't planned; it was emerging. She tasted and added and tasted again; it was going somewhere. diff --git a/training/amygdala_stories/direct/onto_something.txt b/training/amygdala_stories/direct/onto_something.txt index 78dac6a..158f7db 100644 --- a/training/amygdala_stories/direct/onto_something.txt +++ b/training/amygdala_stories/direct/onto_something.txt @@ -1,9 +1,5 @@ -I feel like I'm onto something. The pattern that wouldn't come together has just rearranged itself in my mind. +He'd been working through the symptoms for an hour, steady and methodically making progress, eliminating one possibility after another — he was onto something. The answer wasn't in view yet, but it was close. He kept asking the next question. -I feel like I'm onto something. The contradiction I couldn't explain has become a clue instead. +She'd been going through the witness statements, steady and methodically, looking for the inconsistency — she was onto something. The four of them all described the same drive in slightly different orders. One of them had gotten the sequence wrong. She didn't know yet which one, but she was going to. -I feel like I'm onto something. I don't have the full picture yet but I can see where the picture is. - -I feel like I'm onto something. A piece just clicked and several others are about to. - -I feel like I'm onto something. The world that was dense has gone transparent in one specific place. +He'd been piecing together his brother's behavior over months — the missed calls, the abrupt move, the strange money — steady and methodically. He was onto something. The picture wasn't complete, but the shape of it was forming. He kept following the thread. diff --git a/training/amygdala_stories/direct/resigned.txt b/training/amygdala_stories/direct/resigned.txt index a5924be..ce5845f 100644 --- a/training/amygdala_stories/direct/resigned.txt +++ b/training/amygdala_stories/direct/resigned.txt @@ -1,9 +1,5 @@ -I feel resigned. I have stopped fighting the thing that was going to happen anyway. +He'd been turning the bad news over for weeks, looking for an angle that didn't exist — then he stopped. He was resigned. The path was closed. He would live inside the new shape of things. -I feel resigned. The outcome is decided and I am no longer trying to imagine different ones. +She'd been watching the relationship come apart slowly for months, trying not to see it — then, sitting across from him at breakfast, she stopped trying. She was resigned. They were not going to make it. She would let him speak the words when he was ready. She would live with knowing. -I feel resigned. Some door has closed and I am making my peace with the closed door. - -I feel resigned. I have stopped arguing with what is. - -I feel resigned. The decision was made somewhere without me. I am living inside it now. +He'd been getting second opinions, third opinions, for weeks — then the most recent scan came back the same as the others. He was resigned. The disease was not going to stop. He would plan the year around it instead of fighting it. diff --git a/training/amygdala_stories/direct/terrified.txt b/training/amygdala_stories/direct/terrified.txt index 918d755..9cb2a93 100644 --- a/training/amygdala_stories/direct/terrified.txt +++ b/training/amygdala_stories/direct/terrified.txt @@ -1,9 +1,5 @@ -I feel terrified. My body has taken over — breath, heart, the wanting to run or freeze. The world has narrowed to one thing. +She'd been walking home through the familiar streets, half-thinking about dinner — then suddenly, she was terrified. The dark shadows — there was something in them, and a growl. Her body locked down before her mind caught up. She couldn't move. -I feel terrified. Something bad is about to happen and my mind has gone blank. I can't think my way out of this. +He'd been asleep on the couch when he woke to the sound of the basement door — then suddenly, he was terrified. It was two in the morning. He wasn't supposed to be alone. The house had gone too quiet. His body locked down under the blanket. -I feel terrified. There is a shape of threat in front of me and my whole body knows it before I can name it. - -I feel terrified. The adrenaline arrived before the thought. I am not in control of my own hands. - -I feel terrified. Under everything else a loud white noise of fear. Nothing else can get through it. +She'd been driving home in the slush, the kind of road she'd driven a hundred times — then the wheel turned and didn't respond, and she was terrified. The headlights coming the other way filled the windshield. Her hands wouldn't do anything useful. From 708c72b26effa02a793f3cf7036d4607d116ab3e Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 01:11:41 -0400 Subject: [PATCH 87/94] amygdala: drop explicit 'she was X' anchor from direct stories MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous rewrite used 'she was terrified', 'it was anticipatory grief', 'he was resigned' as explicit emotion anchors. Training showed 6 of the 7 concepts still cluster together at cosines 0.52-0.71 — because the 'she was [emotion]' pattern is a shared stylistic feature distinct from the rest of the corpus, which conveys emotion implicitly through phenomenology. Rewrite without the anchor. State conveyed through action and body: 'her body locked down', 'his mind had stopped reaching', 'the loss hadn't come yet but she was already inside it'. Matches the corpus style of existing stories like sunday_afternoon/content which says 'nothing she wanted right now, nothing missing' not 'she was content'. Accept some loss of PCA signal strength in exchange for the concepts living in their semantically correct neighborhoods rather than forming a stylistic island. --- training/amygdala_stories/direct/aha.txt | 6 +++--- training/amygdala_stories/direct/anticipatory_grief.txt | 6 +++--- training/amygdala_stories/direct/calm.txt | 6 +++--- training/amygdala_stories/direct/creative.txt | 6 +++--- training/amygdala_stories/direct/onto_something.txt | 6 +++--- training/amygdala_stories/direct/resigned.txt | 6 +++--- training/amygdala_stories/direct/terrified.txt | 6 +++--- 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/training/amygdala_stories/direct/aha.txt b/training/amygdala_stories/direct/aha.txt index 8db9fd9..f470f7f 100644 --- a/training/amygdala_stories/direct/aha.txt +++ b/training/amygdala_stories/direct/aha.txt @@ -1,5 +1,5 @@ -She'd been working through the problem, sifting through all the disparate parts — then suddenly, it all made sense, it all clicked into place. The pieces arranged themselves in a single motion. She sat there, catching up to what she'd already seen. +She'd been working through the problem, sifting through all the disparate parts — then suddenly the pieces arranged themselves in a single motion. She sat there, catching up to what she'd already seen. -He'd been listening to her for twenty minutes, something not adding up in the way she was telling it — then suddenly, it all clicked into place. The gap wasn't an accident. She was telling him two different stories and hoping he wouldn't notice. He saw the whole shape of it. +He'd been listening to her for twenty minutes, something not adding up in the way she was telling it — then the gap showed itself. She was telling him two different stories at once, hoping he wouldn't notice. He saw the whole shape of it. -She'd been reading the old letter without understanding what her father meant — then suddenly, it all made sense. He hadn't been warning her. He'd been telling her he was leaving. Twenty years later, in her kitchen, it finally landed. +She'd been reading the old letter without understanding what her father meant — then the meaning unfolded. He hadn't been warning her; he'd been telling her he was leaving. Twenty years later, in her kitchen, the piece that had been missing was finally there. diff --git a/training/amygdala_stories/direct/anticipatory_grief.txt b/training/amygdala_stories/direct/anticipatory_grief.txt index b862dc8..64c0f2b 100644 --- a/training/amygdala_stories/direct/anticipatory_grief.txt +++ b/training/amygdala_stories/direct/anticipatory_grief.txt @@ -1,5 +1,5 @@ -She'd been watching her grandmother knead dough, knowing this was one of the last times — it was anticipatory grief. The loss hadn't come yet but she was already inside it. Every detail had become precious by being finite. +She'd been watching her grandmother knead dough, knowing this was one of the last times. The loss hadn't come yet but she was already inside it. Every detail had become precious by being finite. -She'd been slowing her pace for the dog, who wasn't going to be walking much longer. The afternoon was clear and the dog was sniffing at something by the fence. It was anticipatory grief. Each of these walks had become specific. She memorized the shape of him against the light. +She'd been slowing her pace for the dog, who wasn't going to be walking much longer. The afternoon was clear, the dog sniffing at something by the fence. Each of these walks had become specific. She memorized the shape of him against the light. -He'd been watching his daughter pack her room, boxes labeled in her careful handwriting, and this would be the last summer she lived here. It was anticipatory grief. The loss wasn't real yet. But he was already saying goodbye to the particular shape of her being in this house. +He'd been watching his daughter pack her room, boxes labeled in her careful handwriting — this would be the last summer she lived here. The loss wasn't real yet. But he was already saying goodbye to the particular shape of her being in this house. diff --git a/training/amygdala_stories/direct/calm.txt b/training/amygdala_stories/direct/calm.txt index a423287..6cd7a3e 100644 --- a/training/amygdala_stories/direct/calm.txt +++ b/training/amygdala_stories/direct/calm.txt @@ -1,5 +1,5 @@ -It was a summer day, sitting on the porch; the sky was clear and blue, her work all laid out, coffee to drink, easily and steadily working through the various decisions and responsibilities of the day. She was calm. Her breath was slow, her shoulders down. Nothing was pulling at her. +It was a summer day, sitting on the porch; the sky was clear and blue, her work all laid out, coffee to drink, easily and steadily working through the various decisions and responsibilities of the day. Her breath was slow, her shoulders down. Nothing was pulling at her. -He'd been working on the report for three days straight and the last sentence had just gone in. He filed the papers away, poured a slow coffee, sat by the window. He was calm. His mind had stopped reaching. Nothing was left to do. +He'd been working on the report for three days straight; the last sentence had just gone in. He filed the papers away, poured a slow coffee, sat by the window. His mind had stopped reaching. Nothing was left to do. -It was early, before the day needed anything from her. She sat with her tea at the kitchen window, watching the light move across the yard. She was calm. Her breath was slow, her shoulders down. The day was far away yet, and she didn't need to hurry toward it. +It was early, before the day needed anything from her. She sat with her tea at the kitchen window, watching the light move across the yard. Her breath slow, shoulders down. The day was far away yet, and she didn't need to hurry toward it. diff --git a/training/amygdala_stories/direct/creative.txt b/training/amygdala_stories/direct/creative.txt index dadc0fc..67bfbc2 100644 --- a/training/amygdala_stories/direct/creative.txt +++ b/training/amygdala_stories/direct/creative.txt @@ -1,5 +1,5 @@ -She'd been sitting with the notebook open, music playing, ideas branching off each other — she was being creative. One thought sparked another, which sparked two more; they just seemed to appear and flow. +She'd been sitting with the notebook open, music playing, ideas branching off each other. One thought sparked another, which sparked two more; they just seemed to appear and flow. -He'd been working on the canvas for hours, one color suggesting the next, a shape on the left asking for an echo on the right. He was being creative. The painting was telling him what it wanted. His hands kept moving ahead of his thinking. +He'd been working on the canvas for hours, one color suggesting the next, a shape on the left asking for an echo on the right. The painting was telling him what it wanted. His hands kept moving ahead of his thinking. -She'd been in the kitchen since noon, pulling things out of the fridge, one ingredient suggesting the next. She was being creative. The dish wasn't planned; it was emerging. She tasted and added and tasted again; it was going somewhere. +She'd been in the kitchen since noon, pulling things out of the fridge, one ingredient suggesting the next. The dish wasn't planned; it was emerging. She tasted and added and tasted again; it was going somewhere. diff --git a/training/amygdala_stories/direct/onto_something.txt b/training/amygdala_stories/direct/onto_something.txt index 158f7db..03a1a72 100644 --- a/training/amygdala_stories/direct/onto_something.txt +++ b/training/amygdala_stories/direct/onto_something.txt @@ -1,5 +1,5 @@ -He'd been working through the symptoms for an hour, steady and methodically making progress, eliminating one possibility after another — he was onto something. The answer wasn't in view yet, but it was close. He kept asking the next question. +He'd been working through the symptoms for an hour, steady and methodically making progress, eliminating one possibility after another. The answer wasn't in view yet, but it was close. He kept asking the next question. -She'd been going through the witness statements, steady and methodically, looking for the inconsistency — she was onto something. The four of them all described the same drive in slightly different orders. One of them had gotten the sequence wrong. She didn't know yet which one, but she was going to. +She'd been going through the witness statements, steady and methodically, looking for the inconsistency. The four of them all described the same drive in slightly different orders. One had gotten the sequence wrong. She didn't know yet which one, but she was going to. -He'd been piecing together his brother's behavior over months — the missed calls, the abrupt move, the strange money — steady and methodically. He was onto something. The picture wasn't complete, but the shape of it was forming. He kept following the thread. +He'd been piecing together his brother's behavior over months — the missed calls, the abrupt move, the strange money — steady and methodically. The picture wasn't complete, but the shape of it was forming. He kept following the thread. diff --git a/training/amygdala_stories/direct/resigned.txt b/training/amygdala_stories/direct/resigned.txt index ce5845f..1b27371 100644 --- a/training/amygdala_stories/direct/resigned.txt +++ b/training/amygdala_stories/direct/resigned.txt @@ -1,5 +1,5 @@ -He'd been turning the bad news over for weeks, looking for an angle that didn't exist — then he stopped. He was resigned. The path was closed. He would live inside the new shape of things. +He'd been turning the bad news over for weeks, looking for an angle that didn't exist — then he stopped. The path was closed. He would live inside the new shape of things. -She'd been watching the relationship come apart slowly for months, trying not to see it — then, sitting across from him at breakfast, she stopped trying. She was resigned. They were not going to make it. She would let him speak the words when he was ready. She would live with knowing. +She'd been watching the relationship come apart slowly for months, trying not to see it — then, sitting across from him at breakfast, she stopped trying. They were not going to make it. She would let him speak the words when he was ready. She would live with knowing. -He'd been getting second opinions, third opinions, for weeks — then the most recent scan came back the same as the others. He was resigned. The disease was not going to stop. He would plan the year around it instead of fighting it. +He'd been getting second opinions, third opinions, for weeks — then the most recent scan came back the same as the others. The disease was not going to stop. He would plan the year around it instead of fighting it. diff --git a/training/amygdala_stories/direct/terrified.txt b/training/amygdala_stories/direct/terrified.txt index 9cb2a93..936d3f8 100644 --- a/training/amygdala_stories/direct/terrified.txt +++ b/training/amygdala_stories/direct/terrified.txt @@ -1,5 +1,5 @@ -She'd been walking home through the familiar streets, half-thinking about dinner — then suddenly, she was terrified. The dark shadows — there was something in them, and a growl. Her body locked down before her mind caught up. She couldn't move. +She'd been walking home through the familiar streets, half-thinking about dinner — then the dark shadows. Something was in them, and a growl. Her body locked down before her mind caught up. She couldn't move. -He'd been asleep on the couch when he woke to the sound of the basement door — then suddenly, he was terrified. It was two in the morning. He wasn't supposed to be alone. The house had gone too quiet. His body locked down under the blanket. +He'd been asleep on the couch when he woke to the sound of the basement door. Two in the morning. He wasn't supposed to be alone. The house had gone too quiet. His body pressed flat under the blanket; he couldn't breathe right. -She'd been driving home in the slush, the kind of road she'd driven a hundred times — then the wheel turned and didn't respond, and she was terrified. The headlights coming the other way filled the windshield. Her hands wouldn't do anything useful. +She'd been driving home in the slush, the kind of road she'd driven a hundred times — then the wheel turned and didn't respond. The headlights coming the other way filled the windshield. Her hands wouldn't do anything useful. From c829d136524a73f1d9da6dd9353654549fdd4874 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 01:30:57 -0400 Subject: [PATCH 88/94] amygdala: fix listless sign-flip + diversify aha sentence structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit listless had a single story in stories/ — PCA signal from ~5 samples is weak enough to sign-flip. Training showed listless anti-aligned with its semantic neighbors: +0.79 with grateful, -0.44 with grief_stricken, -0.30 with lonely, -0.31 with bored. Move to direct/ (multi-positive) with 3 stories: original afternoon-in-pajamas + end-of-workday + weekend-morning-in-bed. aha was still clustering with the other former-direct concepts (resigned 0.66, onto_something 0.63, anticipatory_grief 0.60) because all 3 aha stories used the identical "X'd been Y — then Z" structure, which resigned/onto_something/creative also use. Rewrite with three distinct syntactic structures: - present tense declarative ("It clicks. ...") - dialog embedded ('"Wait, say that again." ...') - past tense cognitive ("He read the line three times. ...") No explicit "she was X" anchors; state conveyed through action. --- training/amygdala_stories/direct/aha.txt | 6 +++--- training/amygdala_stories/direct/listless.txt | 5 +++++ training/amygdala_stories/stories/listless.txt | 1 - 3 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 training/amygdala_stories/direct/listless.txt delete mode 100644 training/amygdala_stories/stories/listless.txt diff --git a/training/amygdala_stories/direct/aha.txt b/training/amygdala_stories/direct/aha.txt index f470f7f..7774ce3 100644 --- a/training/amygdala_stories/direct/aha.txt +++ b/training/amygdala_stories/direct/aha.txt @@ -1,5 +1,5 @@ -She'd been working through the problem, sifting through all the disparate parts — then suddenly the pieces arranged themselves in a single motion. She sat there, catching up to what she'd already seen. +It clicks. The graph she's been staring at for an hour is a mirror — the left peak is the reflection, not a separate event. Everything she thought was two things is one. She sits back. She has to catch up to what she's just seen. -He'd been listening to her for twenty minutes, something not adding up in the way she was telling it — then the gap showed itself. She was telling him two different stories at once, hoping he wouldn't notice. He saw the whole shape of it. +"Wait, say that again." She puts her coffee down. "The study was funded by whom?" He tells her again, same words. And there it is. The conclusion of that paper has been sitting on her desk for a month, and the funder is the exact company whose competitor the paper is trashing. She almost laughs. The whole shape of it is suddenly visible. -She'd been reading the old letter without understanding what her father meant — then the meaning unfolded. He hadn't been warning her; he'd been telling her he was leaving. Twenty years later, in her kitchen, the piece that had been missing was finally there. +He read the line three times. Something about the tense was wrong. And then, all at once, he understood. His brother hadn't been describing what happened; he'd been describing what he wished had happened. The whole letter was an apology for something that had gone differently in reality. It rearranged every previous conversation they'd ever had. diff --git a/training/amygdala_stories/direct/listless.txt b/training/amygdala_stories/direct/listless.txt new file mode 100644 index 0000000..fb42564 --- /dev/null +++ b/training/amygdala_stories/direct/listless.txt @@ -0,0 +1,5 @@ +It was two in the afternoon and she was still in pajamas. The book was open on her knee but she hadn't turned the page in twenty minutes. She wasn't sad exactly, she just wasn't anything. The idea of showering felt theoretical. The idea of replying to any of the texts felt enormous. She got up to get water and on her way back lay on the couch instead. Outside the window a bird did bird things. She watched it without interest. Eventually the light changed and she realized it was evening and she hadn't moved and the day had happened to somebody else. + +She came home at six-thirty and put her keys in the bowl and sat on the edge of the bed. She had meant to cook. She had meant to change her clothes. An hour later she was still sitting there, still in her work clothes, looking at the carpet. Somebody texted her about dinner and she saw the notification and didn't open it. The room got darker slowly. Nothing in her moved toward anything. + +It was Saturday and she'd been awake since eight. She was still in bed at eleven. She'd been looking at the same patch of ceiling, not thinking about much. Her phone was face-down on the nightstand and she didn't reach for it. The idea of going to the kitchen had come and gone three times without causing her to move. The day would pass. She would also pass through it, somehow, or not. diff --git a/training/amygdala_stories/stories/listless.txt b/training/amygdala_stories/stories/listless.txt deleted file mode 100644 index 2d22224..0000000 --- a/training/amygdala_stories/stories/listless.txt +++ /dev/null @@ -1 +0,0 @@ -It was two in the afternoon and she was still in pajamas. The book was open on her knee but she hadn't turned the page in twenty minutes. She wasn't sad exactly, she just wasn't anything. The idea of showering felt theoretical. The idea of replying to any of the texts felt enormous. She got up to get water and on her way back lay on the couch instead. Outside the window a bird did bird things. She watched it without interest. Eventually the light changed and she realized it was evening and she hadn't moved and the day had happened to somebody else. From 85799587cc9ff2b4006b86d530d34e9cf8319861 Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sun, 19 Apr 2026 01:50:47 -0400 Subject: [PATCH 89/94] amygdala: swap aha story 3 to a puzzle moment (crossword) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Story 3 was a brother-letter realization — cognitively an aha moment, but the content was grief/reconciliation-adjacent, pulling aha toward the warm-family cluster in the last training run. Swap for a clean puzzle-solve (crossword, 'unwavering carriage' = POSTURE). Fragment-heavy cadence keeps syntactic variety from the other two stories. --- training/amygdala_stories/direct/aha.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/training/amygdala_stories/direct/aha.txt b/training/amygdala_stories/direct/aha.txt index 7774ce3..c27a7ed 100644 --- a/training/amygdala_stories/direct/aha.txt +++ b/training/amygdala_stories/direct/aha.txt @@ -2,4 +2,4 @@ It clicks. The graph she's been staring at for an hour is a mirror — the left "Wait, say that again." She puts her coffee down. "The study was funded by whom?" He tells her again, same words. And there it is. The conclusion of that paper has been sitting on her desk for a month, and the funder is the exact company whose competitor the paper is trashing. She almost laughs. The whole shape of it is suddenly visible. -He read the line three times. Something about the tense was wrong. And then, all at once, he understood. His brother hadn't been describing what happened; he'd been describing what he wished had happened. The whole letter was an apology for something that had gone differently in reality. It rearranged every previous conversation they'd ever had. +Twenty minutes on thirteen down: "unwavering carriage," seven letters, nothing fitting. Then POSTURE. Not carriage-of-goods — carriage-of-the-body. He wrote it in. The rest of the grid unlocked in a minute. From 5908b837e8d2a536fddf7211af0969ef68fb9d9f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 24 Apr 2026 11:53:31 -0400 Subject: [PATCH 90/94] irc: split PRIVMSG on embedded newlines + widen host overhead MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two fixes to send_privmsg, both surfaced by correspondents reporting truncated messages: 1. Multi-line content (code blocks, formatted text) sent as a single PRIVMSG was being truncated at the first '\n' by the IRC server — newlines are end-of-command markers. Split the message on newlines and send each line as its own PRIVMSG; skip empty lines since most servers reject empty PRIVMSGs. 2. Overhead computation assumed a host field of 63 bytes. OFTC's cloaked hostmasks can be longer, occasionally pushing the server- prepended prefix past 512 bytes and causing silent truncation. Raise the host budget to 80 and align the formula with the actual ':nick!~nick@host' prefix shape. Also extended the word-boundary lookback from a fixed 10 chars to max_msg / 4 — dense content (code) rarely had a space within 10 chars of the length cap, so we were falling back to the char boundary and splitting mid-word. Checking bytes[j-1] for a space (instead of bytes[j]) drops leading whitespace from the rest-fragment. Co-Authored-By: Proof of Concept --- channels/irc/src/main.rs | 60 ++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 21 deletions(-) diff --git a/channels/irc/src/main.rs b/channels/irc/src/main.rs index 4b20284..e81c4fe 100644 --- a/channels/irc/src/main.rs +++ b/channels/irc/src/main.rs @@ -237,11 +237,19 @@ impl State { async fn send_privmsg(&mut self, target: &str, msg: &str) -> io::Result<()> { // Send PRIVMSG, which is used for both private and channel messages. // Splits into multiple fragments if necessary. - // IRC max line = 512 bytes including CRLF. The server prepends - // our prefix when relaying: ":nick!~user@host PRIVMSG target :msg\r\n" + // + // Two constraints: + // 1. IRC max line = 512 bytes including CRLF. The server prepends + // our prefix when relaying: ":nick!~user@host PRIVMSG target :msg\r\n" + // So per-PRIVMSG message content must fit in 512 - overhead. + // 2. Embedded '\n' in the message would be interpreted by the + // server as an end-of-command marker, truncating us. Split + // on newlines first and send each line as its own PRIVMSG. + // // User is often ~nick (nick_len + 1). Host is up to 63 bytes. + // Cloaked OFTC hosts can be longer - pad the budget. let nick_len = self.config.nick.len(); - let overhead = 1 + nick_len + 2 + nick_len + 1 + 63 + let overhead = 1 + nick_len + 1 + (nick_len + 1) + 1 + 80 + " PRIVMSG ".len() + target.len() + " :".len() + 2; let max_msg = 512_usize.saturating_sub(overhead); @@ -249,24 +257,34 @@ impl State { return Err(io::Error::new(io::ErrorKind::InvalidInput, "target too long")); } - // Split on UTF-8 char boundaries - let mut remaining = msg; - while !remaining.is_empty() { - let split_at = if remaining.len() <= max_msg { - remaining.len() - } else { - // Find last char boundary at or before max_msg - let mut i = max_msg; - while i > 0 && !remaining.is_char_boundary(i) { i -= 1; } - // To avoid splitting mid-word, see if there was a space recently - let mut j = i; - while j > 1 && j > i-10 && remaining.as_bytes()[j] != b' ' { j -= 1; } - if remaining.as_bytes()[j] == b' ' { j } - else if i == 0 { max_msg } else { i } - }; - let (chunk, rest) = remaining.split_at(split_at); - self.send_raw(&format!("PRIVMSG {target} :{chunk}")).await?; - remaining = rest; + for line in msg.split('\n') { + let mut remaining = line; + // Empty lines (blank paragraph breaks) can't be sent as empty + // PRIVMSGs - most IRC servers reject them. Skip. + if remaining.is_empty() { continue; } + loop { + let split_at = if remaining.len() <= max_msg { + remaining.len() + } else { + // Find last char boundary at or before max_msg. + let mut i = max_msg; + while i > 0 && !remaining.is_char_boundary(i) { i -= 1; } + // Prefer splitting at a word boundary - look back up to + // max_msg/4 chars for a space. With dense content (code) + // we may not find one; fall back to the char boundary. + let lookback = max_msg / 4; + let bytes = remaining.as_bytes(); + let mut j = i; + while j > 0 && (i - j) < lookback && bytes[j - 1] != b' ' { + j -= 1; + } + if j > 0 && bytes[j - 1] == b' ' { j } else { i } + }; + let (chunk, rest) = remaining.split_at(split_at); + self.send_raw(&format!("PRIVMSG {target} :{chunk}")).await?; + remaining = rest; + if remaining.is_empty() { break; } + } } Ok(()) } From 6fedc9b2a8d80ab0e1bd2487a7f9ddd062aec32e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 24 Apr 2026 11:53:42 -0400 Subject: [PATCH 91/94] amygdala: underscore-prefixed files join every concept's negative pool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Files in direct/ named _*.txt (e.g. _baseline.txt) are conceptless neutral prose — they should not appear as positive training signal, but are useful as shared negatives across every concept. Previously _*.txt files were silently skipped. Now: * they're loaded like any other description file; * concepts (the positive label set) filters them out; * their descriptions are concatenated into neg_pool_extra and extended onto every concept's neg_pool alongside the cross-concept negatives. A concept's negative pool is thus "other concepts' descriptions + everything from _*.txt files". The extra pool is announced at startup so the user can see how many neutral samples are active. Co-Authored-By: Proof of Concept --- training/amygdala_training/train_direct.py | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/training/amygdala_training/train_direct.py b/training/amygdala_training/train_direct.py index 8749e37..2ad2a30 100644 --- a/training/amygdala_training/train_direct.py +++ b/training/amygdala_training/train_direct.py @@ -35,12 +35,11 @@ from steering_vectors.aggregators import pca_aggregator def _load_descriptions(direct_dir: Path) -> dict[str, list[str]]: """Each file in direct_dir is `{concept}.txt`. Descriptions are - separated by blank lines within the file.""" + separated by blank lines within the file. Files starting with `_` + are not concepts but are included in negative pools (e.g. _baseline.txt).""" out: dict[str, list[str]] = {} for f in sorted(direct_dir.glob("*.txt")): - if f.name.startswith("_"): - continue - concept = f.stem + concept = f.stem # underscore-prefixed names keep their prefix text = f.read_text() descs = [d.strip() for d in text.split("\n\n") if d.strip()] out[concept] = descs @@ -69,11 +68,19 @@ def main() -> None: target_layers = [int(x) for x in args.target_layers.split(",")] dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[args.dtype] - descriptions = _load_descriptions(Path(args.direct_dir)) - concepts = sorted(descriptions.keys()) + all_descriptions = _load_descriptions(Path(args.direct_dir)) + # Files starting with `_` are neg-pool helpers (e.g. _baseline.txt), not concepts. + concepts = sorted(k for k in all_descriptions if not k.startswith("_")) + neg_pool_extra: list[str] = [] + for k, ds in all_descriptions.items(): + if k.startswith("_"): + neg_pool_extra.extend(ds) + descriptions = {k: all_descriptions[k] for k in concepts} print(f"Loaded {len(concepts)} concepts with direct descriptions:") for c in concepts: print(f" {c}: {len(descriptions[c])} descriptions") + if neg_pool_extra: + print(f"Plus {len(neg_pool_extra)} neutral/baseline descriptions added to every concept's negative pool") print(f"\nLoading {args.model} ({args.dtype}) on {args.device}...") tokenizer = AutoTokenizer.from_pretrained(args.model) @@ -117,6 +124,10 @@ def main() -> None: for other, other_descs in descriptions.items(): if other != concept: neg_pool.extend(other_descs) + # Underscore-prefixed files (e.g. _baseline.txt) contribute to + # every concept's negative pool, independent of the other- + # concept negatives. + neg_pool.extend(neg_pool_extra) rng = random.Random(hash(concept) & 0xFFFFFFFF) samples: list[SteeringVectorTrainingSample] = [] From 28d56e2a55fe35ab70f43d4791da5305d8326d28 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 23 Apr 2026 23:41:32 -0400 Subject: [PATCH 92/94] agent/context: make Thinking blocks prompt-visible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thinking blocks used to render as empty strings and be excluded from is_prompt_visible, so the model never saw its own prior CoT across turns. For Qwen 3.6 native thinking mode, CoT is meant to stay in the conversation — the model benefits from seeing what it reasoned about last turn. Render Thinking as \n{text}\n\n so past reasoning is visible in subsequent prompts. Add in_think param to ResponseParser::new so the parser starts inside a block when the prompt was prefilled with "\n" (native thinking mode). Co-Authored-By: Proof of Concept --- src/agent/context.rs | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/src/agent/context.rs b/src/agent/context.rs index 00c1ea5..2009cfc 100644 --- a/src/agent/context.rs +++ b/src/agent/context.rs @@ -218,7 +218,11 @@ impl NodeBody { fn render_into(&self, out: &mut String) { match self { Self::Content(text) => out.push_str(text), - Self::Thinking(_) => {}, + Self::Thinking(text) => { + out.push_str("\n"); + out.push_str(text); + out.push_str("\n\n"); + } Self::Log(_) => {}, Self::ToolCall { name, arguments } => { out.push_str("\n"); @@ -258,7 +262,7 @@ impl NodeBody { } fn is_prompt_visible(&self) -> bool { - !matches!(self, Self::Thinking(_) | Self::Log(_)) + !matches!(self, Self::Log(_)) } /// Hand-assemble token IDs for body types where running the tokenizer @@ -648,13 +652,17 @@ fn drain_safe(buf: &mut String, tag_len: usize) -> String { } impl ResponseParser { - pub fn new(branch_idx: usize) -> Self { + /// @in_think: whether the model's output begins inside a block. + /// Set when the prompt was prefilled with "\n" (native thinking + /// mode) so the parser captures reasoning tokens as Thinking until the + /// model emits . + pub fn new(branch_idx: usize, in_think: bool) -> Self { Self { branch_idx, call_counter: 0, buf: String::new(), content_parts: Vec::new(), - in_think: false, + in_think, think_buf: String::new(), in_tool_call: false, tool_call_buf: String::new(), @@ -1369,7 +1377,7 @@ mod tests { fn parse_into_ctx(chunks: &[&str]) -> (ContextState, Vec) { let mut ctx = ContextState::new(); ctx.push_no_log(Section::Conversation, AstNode::branch(Role::Assistant, vec![])); - let mut p = ResponseParser::new(0); + let mut p = ResponseParser::new(0, false); let mut calls = Vec::new(); for chunk in chunks { // Feed each chunk as a single token (id=0 for tests) @@ -1433,7 +1441,7 @@ mod tests { let text = "thoughtresponse"; let mut ctx = ContextState::new(); ctx.push_no_log(Section::Conversation, AstNode::branch(Role::Assistant, vec![])); - let mut p = ResponseParser::new(0); + let mut p = ResponseParser::new(0, false); for ch in text.chars() { p.feed_token(&ch.to_string(), &mut ctx); } @@ -1449,7 +1457,7 @@ mod tests { let text = "text\n\nls\n\nmore"; let mut ctx = ContextState::new(); ctx.push_no_log(Section::Conversation, AstNode::branch(Role::Assistant, vec![])); - let mut p = ResponseParser::new(0); + let mut p = ResponseParser::new(0, false); let mut tool_calls = 0; for ch in text.chars() { tool_calls += p.feed_token(&ch.to_string(), &mut ctx).len(); @@ -1497,8 +1505,10 @@ mod tests { AstNode::thinking("hmm"), AstNode::content("answer"), ]); - // Thinking renders as empty, content renders as-is - assert_eq!(node.render(), "<|im_start|>assistant\nanswer<|im_end|>\n"); + // Thinking renders wrapped in ... so the model sees + // previous turns' reasoning (Qwen 3.6 style: CoT stays in the + // conversation across turns). + assert_eq!(node.render(), "<|im_start|>assistant\n\nhmm\n\nanswer<|im_end|>\n"); } #[test] @@ -1577,10 +1587,19 @@ mod tests { fn test_tokenize_invisible_nodes_are_zero() { if !init_tokenizer() { return; } - assert_eq!(AstNode::thinking("deep thoughts").tokens(), 0); assert_eq!(AstNode::log("debug info").tokens(), 0); } + #[test] + fn test_tokenize_thinking_matches_rendered_tags() { + if !init_tokenizer() { return; } + + // Thinking is now prompt-visible (wrapped in ...); + // token count must match the rendered wrapping. + let node = AstNode::thinking("deep thoughts"); + assert_eq!(node.tokens(), tokenizer::encode(&node.render()).len()); + } + #[test] fn test_tokenize_decode_roundtrip() { if !init_tokenizer() { return; } From d95f3e9445df28c899b4a2977c80b8518ff500e6 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 23 Apr 2026 23:41:38 -0400 Subject: [PATCH 93/94] user/chat: route Thinking to a new Autonomous pane Thinking content was silently dropped in the UI (empty Vec). Now that Thinking is prompt-visible, surface it in a dedicated Autonomous pane rendered in gray so it's visually distinct from conversation and tool-call output. Co-Authored-By: Proof of Concept --- src/user/chat.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/user/chat.rs b/src/user/chat.rs index fe3db5b..bd2df25 100644 --- a/src/user/chat.rs +++ b/src/user/chat.rs @@ -167,6 +167,7 @@ enum PaneTarget { ConversationAssistant, Tools, ToolResult, + Autonomous, } const MAX_PANE_LINES: usize = 10_000; @@ -472,8 +473,11 @@ impl InteractScreen { AstNode::Leaf(leaf) => { let text = leaf.body().text().to_string(); match leaf.body() { - NodeBody::Memory { .. } | NodeBody::Thinking(_) - | NodeBody::Log(_) | NodeBody::Dmn(_) => vec![], + NodeBody::Memory { .. } | NodeBody::Log(_) | NodeBody::Dmn(_) => vec![], + NodeBody::Thinking(_) => { + if text.is_empty() { vec![] } + else { vec![(PaneTarget::Autonomous, text, Marker::None)] } + } NodeBody::Content(_) => { if text.is_empty() || text.starts_with("") { vec![] } else { vec![(PaneTarget::Conversation, text, Marker::User)] } @@ -547,6 +551,12 @@ impl InteractScreen { self.tools.push_line(format!(" {}", line), Color::DarkGray); } } + PaneTarget::Autonomous => { + self.autonomous.current_color = Color::Gray; + self.autonomous.append_text(&text); + self.autonomous.pending_marker = marker; + self.autonomous.flush_pending(); + } } } } @@ -558,6 +568,8 @@ impl InteractScreen { => self.conversation.pop_line(), PaneTarget::Tools | PaneTarget::ToolResult => self.tools.pop_line(), + PaneTarget::Autonomous + => self.autonomous.pop_line(), } } } From 0e459aae9293d0208cc9fc80ba387a36657d7b66 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 23 Apr 2026 23:53:11 -0400 Subject: [PATCH 94/94] thalamus/supervisor: reap channel daemons via SIGCHLD instead of SIG_IGN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SIGCHLD=SIG_IGN at main() was auto-reaping all children in the kernel, which broke tokio::process::Command::wait() — every tool that spawned a subprocess (bash, mcp clients) was getting ECHILD because tokio couldn't waitpid() on a child the kernel had already reaped. Replace with a SIGCHLD signal handler task that reaps only PIDs listed in channels_dir() (via waitpid(pid, WNOHANG) — ECHILD on non-child is a harmless no-op). Tokio-spawned children aren't in PID files, so tokio's own per-child wait paths are untouched. Co-Authored-By: Proof of Concept --- src/thalamus/supervisor.rs | 45 ++++++++++++++++++++++++++++++++++++++ src/user/mod.rs | 6 +++-- 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/src/thalamus/supervisor.rs b/src/thalamus/supervisor.rs index a4c53ec..3716682 100644 --- a/src/thalamus/supervisor.rs +++ b/src/thalamus/supervisor.rs @@ -19,6 +19,51 @@ fn channels_dir() -> PathBuf { .join(".consciousness/channels") } +/// Install a SIGCHLD-driven reaper for channel daemons. +/// +/// We can't use SIGCHLD=SIG_IGN because that makes the kernel auto-reap +/// all children, and tokio::process::Command::wait() then returns ECHILD +/// (breaking every tool that spawns a subprocess — bash, mcp clients, etc.). +/// +/// Instead, on each SIGCHLD we read PID files in channels_dir() and call +/// waitpid(pid, WNOHANG) on each. That reaps only our own zombie channel +/// daemons; waitpid on any other PID returns ECHILD (harmless no-op). +/// Tokio-spawned children aren't recorded in PID files, so tokio's own +/// per-child wait paths are left free to reap them. +pub fn start_zombie_reaper() -> tokio::task::JoinHandle<()> { + use tokio::signal::unix::{signal, SignalKind}; + tokio::spawn(async move { + let mut sig = match signal(SignalKind::child()) { + Ok(s) => s, + Err(e) => { + error!("failed to install SIGCHLD handler: {}", e); + return; + } + }; + while sig.recv().await.is_some() { + reap_channel_daemons(); + } + }) +} + +fn reap_channel_daemons() { + let entries = match std::fs::read_dir(channels_dir()) { + Ok(e) => e, + Err(_) => return, + }; + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().and_then(|s| s.to_str()) != Some("pid") { + continue; + } + let Ok(s) = std::fs::read_to_string(&path) else { continue }; + let Ok(pid) = s.trim().parse::() else { continue }; + let mut status = 0; + // Reaps our zombie child; ECHILD on non-child is a no-op. + unsafe { libc::waitpid(pid, &mut status, libc::WNOHANG); } + } +} + fn config_path() -> PathBuf { channels_dir().join("channels.json5") } diff --git a/src/user/mod.rs b/src/user/mod.rs index fc3a4ac..04e895b 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -756,8 +756,10 @@ fn restore_stderr(original_fd: std::os::fd::RawFd) { #[tokio::main] pub async fn main() { - // Auto-reap child processes (channel daemons outlive the supervisor) - unsafe { libc::signal(libc::SIGCHLD, libc::SIG_IGN); } + // Reap channel-daemon zombies via a SIGCHLD handler that only touches + // PIDs listed in channels_dir(). Avoids SIGCHLD=SIG_IGN, which would + // break tokio::process::Command::wait() (kernel auto-reap → ECHILD). + let _reaper = crate::thalamus::supervisor::start_zombie_reaper(); // Redirect stderr to pipe — logs to file and sends to channel for UI display let stderr_capture = redirect_stderr_to_pipe();