delete dead flat-file journal code from thought/context.rs
Journal entries are loaded from the memory graph store, not from the
flat journal file. Remove build_context_window, plan_context,
render_journal_text, assemble_context, truncate_at_section,
find_journal_cutoff, parse_journal*, ContextPlan, and stale TODOs.
Keep JournalEntry, default_journal_path (write path), and the live
context management functions. -363 lines.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-02 15:31:12 -04:00
|
|
|
// context.rs — Context window management
|
2026-03-27 15:22:48 -04:00
|
|
|
//
|
delete dead flat-file journal code from thought/context.rs
Journal entries are loaded from the memory graph store, not from the
flat journal file. Remove build_context_window, plan_context,
render_journal_text, assemble_context, truncate_at_section,
find_journal_cutoff, parse_journal*, ContextPlan, and stale TODOs.
Keep JournalEntry, default_journal_path (write path), and the live
context management functions. -363 lines.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-02 15:31:12 -04:00
|
|
|
// Token counting, conversation trimming, and error classification.
|
|
|
|
|
// Journal entries are loaded from the memory graph store, not from
|
|
|
|
|
// a flat file — the parse functions are gone.
|
2026-03-27 15:22:48 -04:00
|
|
|
|
Restrict API types visibility — types module is now private
Only Message, Role, MessageContent, ContentPart, ToolCall,
FunctionCall, Usage, ImageUrl are pub-exported from agent::api.
Internal types (ChatRequest, ChatCompletionChunk, ChunkChoice,
Delta, ReasoningConfig, ToolCallDelta, FunctionCallDelta) are
pub(crate) — invisible outside the crate.
All callers updated to import from agent::api:: instead of
agent::api::types::.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-07 13:39:20 -04:00
|
|
|
use crate::agent::api::*;
|
delete dead flat-file journal code from thought/context.rs
Journal entries are loaded from the memory graph store, not from the
flat journal file. Remove build_context_window, plan_context,
render_journal_text, assemble_context, truncate_at_section,
find_journal_cutoff, parse_journal*, ContextPlan, and stale TODOs.
Keep JournalEntry, default_journal_path (write path), and the live
context management functions. -363 lines.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-02 15:31:12 -04:00
|
|
|
use chrono::{DateTime, Utc};
|
2026-04-04 00:29:11 -04:00
|
|
|
use serde::{Deserialize, Serialize};
|
2026-03-27 15:22:48 -04:00
|
|
|
use tiktoken_rs::CoreBPE;
|
2026-04-04 17:00:01 -04:00
|
|
|
use crate::agent::tools::working_stack;
|
2026-04-02 15:25:07 -04:00
|
|
|
|
2026-04-07 20:15:31 -04:00
|
|
|
// --- Context state types ---
|
|
|
|
|
|
|
|
|
|
/// Conversation entry — either a regular message or memory content.
|
|
|
|
|
/// Memory entries preserve the original message for KV cache round-tripping.
|
|
|
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
|
|
|
pub enum ConversationEntry {
|
|
|
|
|
/// System prompt or system-level instruction.
|
|
|
|
|
System(Message),
|
|
|
|
|
Message(Message),
|
|
|
|
|
Memory { key: String, message: Message, score: Option<f64> },
|
|
|
|
|
/// DMN heartbeat/autonomous prompt — evicted aggressively during compaction.
|
|
|
|
|
Dmn(Message),
|
|
|
|
|
/// Debug/status log line — written to conversation log for tracing,
|
|
|
|
|
/// skipped on read-back.
|
|
|
|
|
Log(String),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Entry in the context window — wraps a ConversationEntry with cached metadata.
|
2026-04-04 00:29:11 -04:00
|
|
|
#[derive(Debug, Clone)]
|
2026-04-07 20:15:31 -04:00
|
|
|
pub struct ContextEntry {
|
|
|
|
|
pub entry: ConversationEntry,
|
|
|
|
|
/// Cached token count (0 for Log entries).
|
2026-04-04 00:29:11 -04:00
|
|
|
pub tokens: usize,
|
2026-04-07 20:15:31 -04:00
|
|
|
/// When this entry was added to the context.
|
|
|
|
|
pub timestamp: Option<DateTime<Utc>>,
|
2026-04-04 00:29:11 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:15:31 -04:00
|
|
|
/// A named section of the context window with cached token total.
|
2026-04-02 15:25:07 -04:00
|
|
|
#[derive(Debug, Clone)]
|
2026-04-07 20:15:31 -04:00
|
|
|
pub struct ContextSection {
|
|
|
|
|
pub name: String,
|
|
|
|
|
/// Cached sum of entry tokens.
|
|
|
|
|
tokens: usize,
|
|
|
|
|
entries: Vec<ContextEntry>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl ContextSection {
|
|
|
|
|
pub fn new(name: impl Into<String>) -> Self {
|
|
|
|
|
Self { name: name.into(), tokens: 0, entries: Vec::new() }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn entries(&self) -> &[ContextEntry] { &self.entries }
|
|
|
|
|
pub fn tokens(&self) -> usize { self.tokens }
|
|
|
|
|
pub fn len(&self) -> usize { self.entries.len() }
|
|
|
|
|
pub fn is_empty(&self) -> bool { self.entries.is_empty() }
|
|
|
|
|
|
|
|
|
|
/// Push an entry, updating the cached token total.
|
|
|
|
|
pub fn push(&mut self, entry: ContextEntry) {
|
|
|
|
|
self.tokens += entry.tokens;
|
|
|
|
|
self.entries.push(entry);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Replace an entry at `index`, adjusting the token total.
|
|
|
|
|
pub fn set(&mut self, index: usize, entry: ContextEntry) {
|
|
|
|
|
self.tokens -= self.entries[index].tokens;
|
|
|
|
|
self.tokens += entry.tokens;
|
|
|
|
|
self.entries[index] = entry;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Remove an entry at `index`, adjusting the token total.
|
|
|
|
|
pub fn del(&mut self, index: usize) -> ContextEntry {
|
|
|
|
|
let removed = self.entries.remove(index);
|
|
|
|
|
self.tokens -= removed.tokens;
|
|
|
|
|
removed
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Replace the message inside an entry, recomputing its token count.
|
|
|
|
|
pub fn set_message(&mut self, index: usize, tokenizer: &CoreBPE, msg: Message) {
|
|
|
|
|
let old_tokens = self.entries[index].tokens;
|
|
|
|
|
*self.entries[index].entry.message_mut() = msg;
|
|
|
|
|
let new_tokens = msg_token_count(tokenizer, self.entries[index].entry.api_message());
|
|
|
|
|
self.entries[index].tokens = new_tokens;
|
|
|
|
|
self.tokens = self.tokens - old_tokens + new_tokens;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Set the score on a Memory entry. No token change.
|
|
|
|
|
pub fn set_score(&mut self, index: usize, score: Option<f64>) {
|
|
|
|
|
if let ConversationEntry::Memory { score: s, .. } = &mut self.entries[index].entry {
|
|
|
|
|
*s = score;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Bulk replace all entries, recomputing token total.
|
|
|
|
|
pub fn set_entries(&mut self, entries: Vec<ContextEntry>) {
|
|
|
|
|
self.tokens = entries.iter().map(|e| e.tokens).sum();
|
|
|
|
|
self.entries = entries;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Dedup and trim entries to fit within context budget.
|
2026-04-07 20:55:35 -04:00
|
|
|
pub fn trim(&mut self, fixed_tokens: usize) {
|
|
|
|
|
let result = trim_entries(&self.entries, fixed_tokens);
|
2026-04-07 20:15:31 -04:00
|
|
|
self.entries = result;
|
|
|
|
|
self.tokens = self.entries.iter().map(|e| e.tokens).sum();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Clear all entries.
|
|
|
|
|
pub fn clear(&mut self) {
|
|
|
|
|
self.entries.clear();
|
|
|
|
|
self.tokens = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
|
pub struct ContextState {
|
|
|
|
|
pub system: ContextSection,
|
|
|
|
|
pub identity: ContextSection,
|
|
|
|
|
pub journal: ContextSection,
|
|
|
|
|
pub conversation: ContextSection,
|
|
|
|
|
/// Working stack — separate from identity because it's managed
|
|
|
|
|
/// by its own tool, not loaded from personality files.
|
|
|
|
|
pub working_stack: Vec<String>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl ContextState {
|
|
|
|
|
/// Total tokens across all sections.
|
|
|
|
|
pub fn total_tokens(&self) -> usize {
|
|
|
|
|
self.system.tokens() + self.identity.tokens()
|
|
|
|
|
+ self.journal.tokens() + self.conversation.tokens()
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:55:35 -04:00
|
|
|
/// Budget status string for debug logging.
|
|
|
|
|
pub fn format_budget(&self) -> String {
|
|
|
|
|
let window = context_window();
|
|
|
|
|
if window == 0 { return String::new(); }
|
|
|
|
|
let used = self.total_tokens();
|
|
|
|
|
let free = window.saturating_sub(used);
|
|
|
|
|
let pct = |n: usize| if n == 0 { 0 } else { ((n * 100) / window).max(1) };
|
|
|
|
|
format!("sys:{}% id:{}% jnl:{}% conv:{}% free:{}%",
|
|
|
|
|
pct(self.system.tokens()), pct(self.identity.tokens()),
|
|
|
|
|
pct(self.journal.tokens()), pct(self.conversation.tokens()),
|
|
|
|
|
pct(free))
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:15:31 -04:00
|
|
|
/// All sections as a slice for iteration.
|
|
|
|
|
pub fn sections(&self) -> [&ContextSection; 4] {
|
|
|
|
|
[&self.system, &self.identity, &self.journal, &self.conversation]
|
|
|
|
|
}
|
2026-04-02 15:25:07 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-02 15:58:03 -04:00
|
|
|
/// Context window size in tokens (from config).
|
|
|
|
|
pub fn context_window() -> usize {
|
2026-04-02 14:09:54 -04:00
|
|
|
crate::config::get().api_context_window
|
2026-03-27 15:22:48 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-02 22:53:54 -04:00
|
|
|
/// Context budget in tokens: 80% of the model's context window.
|
|
|
|
|
/// The remaining 20% is reserved for model output.
|
2026-04-02 15:58:03 -04:00
|
|
|
fn context_budget_tokens() -> usize {
|
2026-04-02 22:53:54 -04:00
|
|
|
context_window() * 80 / 100
|
2026-03-27 15:22:48 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-02 15:58:03 -04:00
|
|
|
/// Dedup and trim conversation entries to fit within the context budget.
|
|
|
|
|
///
|
2026-04-07 20:55:35 -04:00
|
|
|
/// Phase 1: Drop duplicate memories (keep last) and DMN entries.
|
|
|
|
|
/// Phase 2: While over budget, drop lowest-scored memory (or if memories
|
|
|
|
|
/// are under 50%, drop oldest conversation entry).
|
|
|
|
|
fn trim_entries(entries: &[ContextEntry], fixed_tokens: usize) -> Vec<ContextEntry> {
|
|
|
|
|
let max_tokens = context_budget_tokens();
|
|
|
|
|
|
|
|
|
|
// Phase 1: dedup memories by key (keep last), drop DMN entries
|
2026-04-02 15:58:03 -04:00
|
|
|
let mut seen_keys: std::collections::HashMap<&str, usize> = std::collections::HashMap::new();
|
|
|
|
|
let mut drop_indices: std::collections::HashSet<usize> = std::collections::HashSet::new();
|
|
|
|
|
|
2026-04-07 20:15:31 -04:00
|
|
|
for (i, ce) in entries.iter().enumerate() {
|
2026-04-07 20:55:35 -04:00
|
|
|
if ce.entry.is_dmn() {
|
|
|
|
|
drop_indices.insert(i);
|
|
|
|
|
} else if let ConversationEntry::Memory { key, .. } = &ce.entry {
|
2026-04-02 15:58:03 -04:00
|
|
|
if let Some(prev) = seen_keys.insert(key.as_str(), i) {
|
|
|
|
|
drop_indices.insert(prev);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:55:35 -04:00
|
|
|
let mut result: Vec<ContextEntry> = entries.iter().enumerate()
|
2026-04-02 15:58:03 -04:00
|
|
|
.filter(|(i, _)| !drop_indices.contains(i))
|
|
|
|
|
.map(|(_, e)| e.clone())
|
|
|
|
|
.collect();
|
|
|
|
|
|
2026-04-07 20:55:35 -04:00
|
|
|
let entry_total = |r: &[ContextEntry]| -> usize { r.iter().map(|e| e.tokens).sum::<usize>() };
|
|
|
|
|
let mem_total = |r: &[ContextEntry]| -> usize {
|
|
|
|
|
r.iter().filter(|e| e.entry.is_memory()).map(|e| e.tokens).sum()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
dbglog!("[trim] max={} fixed={} total={} entries={}",
|
|
|
|
|
max_tokens, fixed_tokens, fixed_tokens + entry_total(&result), result.len());
|
|
|
|
|
|
|
|
|
|
// Phase 2: while over budget, evict
|
|
|
|
|
while fixed_tokens + entry_total(&result) > max_tokens {
|
|
|
|
|
let mt = mem_total(&result);
|
|
|
|
|
let ct = entry_total(&result) - mt;
|
|
|
|
|
|
|
|
|
|
if mt > ct && let Some(i) = lowest_scored_memory(&result) {
|
|
|
|
|
// If memories > 50% of entry tokens, drop lowest-scored memory
|
|
|
|
|
result.remove(i);
|
|
|
|
|
} else if let Some(i) = result.iter().position(|e| !e.entry.is_memory()) {
|
|
|
|
|
// Otherwise drop oldest conversation entry
|
|
|
|
|
result.remove(i);
|
|
|
|
|
} else {
|
|
|
|
|
break;
|
2026-04-06 21:48:12 -04:00
|
|
|
}
|
2026-03-27 15:22:48 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:55:35 -04:00
|
|
|
// Snap to user message boundary at the start
|
|
|
|
|
while let Some(first) = result.first() {
|
|
|
|
|
if first.entry.message().role == Role::User { break; }
|
|
|
|
|
result.remove(0);
|
2026-03-27 15:22:48 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:55:35 -04:00
|
|
|
dbglog!("[trim] result={} total={}", result.len(), fixed_tokens + entry_total(&result));
|
2026-04-06 21:48:12 -04:00
|
|
|
result
|
2026-03-27 15:22:48 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:55:35 -04:00
|
|
|
fn lowest_scored_memory(entries: &[ContextEntry]) -> Option<usize> {
|
|
|
|
|
entries.iter().enumerate()
|
|
|
|
|
.filter(|(_, e)| e.entry.is_memory())
|
|
|
|
|
.min_by(|(_, a), (_, b)| {
|
|
|
|
|
let sa = match &a.entry { ConversationEntry::Memory { score, .. } => score.unwrap_or(0.0), _ => 0.0 };
|
|
|
|
|
let sb = match &b.entry { ConversationEntry::Memory { score, .. } => score.unwrap_or(0.0), _ => 0.0 };
|
|
|
|
|
sa.partial_cmp(&sb).unwrap_or(std::cmp::Ordering::Equal)
|
|
|
|
|
})
|
|
|
|
|
.map(|(i, _)| i)
|
|
|
|
|
}
|
|
|
|
|
|
delete dead flat-file journal code from thought/context.rs
Journal entries are loaded from the memory graph store, not from the
flat journal file. Remove build_context_window, plan_context,
render_journal_text, assemble_context, truncate_at_section,
find_journal_cutoff, parse_journal*, ContextPlan, and stale TODOs.
Keep JournalEntry, default_journal_path (write path), and the live
context management functions. -363 lines.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-02 15:31:12 -04:00
|
|
|
/// Count the token footprint of a message using BPE tokenization.
|
|
|
|
|
pub fn msg_token_count(tokenizer: &CoreBPE, msg: &Message) -> usize {
|
|
|
|
|
let count = |s: &str| tokenizer.encode_with_special_tokens(s).len();
|
2026-03-27 15:22:48 -04:00
|
|
|
let content = msg.content.as_ref().map_or(0, |c| match c {
|
|
|
|
|
MessageContent::Text(s) => count(s),
|
delete dead flat-file journal code from thought/context.rs
Journal entries are loaded from the memory graph store, not from the
flat journal file. Remove build_context_window, plan_context,
render_journal_text, assemble_context, truncate_at_section,
find_journal_cutoff, parse_journal*, ContextPlan, and stale TODOs.
Keep JournalEntry, default_journal_path (write path), and the live
context management functions. -363 lines.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-02 15:31:12 -04:00
|
|
|
MessageContent::Parts(parts) => parts.iter()
|
2026-03-27 15:22:48 -04:00
|
|
|
.map(|p| match p {
|
|
|
|
|
ContentPart::Text { text } => count(text),
|
|
|
|
|
ContentPart::ImageUrl { .. } => 85,
|
|
|
|
|
})
|
|
|
|
|
.sum(),
|
|
|
|
|
});
|
|
|
|
|
let tools = msg.tool_calls.as_ref().map_or(0, |calls| {
|
delete dead flat-file journal code from thought/context.rs
Journal entries are loaded from the memory graph store, not from the
flat journal file. Remove build_context_window, plan_context,
render_journal_text, assemble_context, truncate_at_section,
find_journal_cutoff, parse_journal*, ContextPlan, and stale TODOs.
Keep JournalEntry, default_journal_path (write path), and the live
context management functions. -363 lines.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-02 15:31:12 -04:00
|
|
|
calls.iter()
|
2026-03-27 15:22:48 -04:00
|
|
|
.map(|c| count(&c.function.arguments) + count(&c.function.name))
|
|
|
|
|
.sum()
|
|
|
|
|
});
|
|
|
|
|
content + tools
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Detect context window overflow errors from the API.
|
|
|
|
|
pub fn is_context_overflow(err: &anyhow::Error) -> bool {
|
|
|
|
|
let msg = err.to_string().to_lowercase();
|
|
|
|
|
msg.contains("context length")
|
|
|
|
|
|| msg.contains("token limit")
|
|
|
|
|
|| msg.contains("too many tokens")
|
|
|
|
|
|| msg.contains("maximum context")
|
|
|
|
|
|| msg.contains("prompt is too long")
|
|
|
|
|
|| msg.contains("request too large")
|
|
|
|
|
|| msg.contains("input validation error")
|
|
|
|
|
|| msg.contains("content length limit")
|
|
|
|
|
|| (msg.contains("400") && msg.contains("tokens"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Detect model/provider errors delivered inside the SSE stream.
|
|
|
|
|
pub fn is_stream_error(err: &anyhow::Error) -> bool {
|
|
|
|
|
err.to_string().contains("model stream error")
|
|
|
|
|
}
|
2026-04-04 00:29:11 -04:00
|
|
|
|
|
|
|
|
// Custom serde: serialize Memory with a "memory_key" field added to the message,
|
|
|
|
|
// plain messages serialize as-is. This keeps the conversation log readable.
|
|
|
|
|
impl Serialize for ConversationEntry {
|
|
|
|
|
fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
|
|
|
|
|
use serde::ser::SerializeMap;
|
|
|
|
|
match self {
|
2026-04-07 20:15:31 -04:00
|
|
|
Self::System(m) | Self::Message(m) | Self::Dmn(m) => m.serialize(s),
|
2026-04-07 03:14:24 -04:00
|
|
|
Self::Memory { key, message, score } => {
|
2026-04-04 00:29:11 -04:00
|
|
|
let json = serde_json::to_value(message).map_err(serde::ser::Error::custom)?;
|
|
|
|
|
let mut map = s.serialize_map(None)?;
|
|
|
|
|
if let serde_json::Value::Object(obj) = json {
|
|
|
|
|
for (k, v) in obj {
|
|
|
|
|
map.serialize_entry(&k, &v)?;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
map.serialize_entry("memory_key", key)?;
|
2026-04-07 03:14:24 -04:00
|
|
|
if let Some(s) = score {
|
|
|
|
|
map.serialize_entry("memory_score", s)?;
|
|
|
|
|
}
|
2026-04-04 00:29:11 -04:00
|
|
|
map.end()
|
|
|
|
|
}
|
2026-04-07 01:23:22 -04:00
|
|
|
Self::Log(text) => {
|
|
|
|
|
use serde::ser::SerializeMap;
|
|
|
|
|
let mut map = s.serialize_map(Some(1))?;
|
|
|
|
|
map.serialize_entry("log", text)?;
|
|
|
|
|
map.end()
|
|
|
|
|
}
|
2026-04-04 00:29:11 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<'de> Deserialize<'de> for ConversationEntry {
|
|
|
|
|
fn deserialize<D: serde::Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
|
|
|
|
|
let mut json: serde_json::Value = serde_json::Value::deserialize(d)?;
|
2026-04-07 01:23:22 -04:00
|
|
|
// Log entries — skip on read-back
|
|
|
|
|
if json.get("log").is_some() {
|
|
|
|
|
let text = json["log"].as_str().unwrap_or("").to_string();
|
|
|
|
|
return Ok(Self::Log(text));
|
|
|
|
|
}
|
2026-04-04 00:29:11 -04:00
|
|
|
if let Some(key) = json.as_object_mut().and_then(|o| o.remove("memory_key")) {
|
|
|
|
|
let key = key.as_str().unwrap_or("").to_string();
|
2026-04-07 03:14:24 -04:00
|
|
|
let score = json.as_object_mut()
|
|
|
|
|
.and_then(|o| o.remove("memory_score"))
|
|
|
|
|
.and_then(|v| v.as_f64());
|
2026-04-04 00:29:11 -04:00
|
|
|
let message: Message = serde_json::from_value(json).map_err(serde::de::Error::custom)?;
|
2026-04-07 03:14:24 -04:00
|
|
|
Ok(Self::Memory { key, message, score })
|
2026-04-04 00:29:11 -04:00
|
|
|
} else {
|
|
|
|
|
let message: Message = serde_json::from_value(json).map_err(serde::de::Error::custom)?;
|
|
|
|
|
Ok(Self::Message(message))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl ConversationEntry {
|
|
|
|
|
/// Get the API message for sending to the model.
|
2026-04-07 01:23:22 -04:00
|
|
|
/// Panics on Log entries (which should be filtered before API calls).
|
2026-04-04 00:29:11 -04:00
|
|
|
pub fn api_message(&self) -> &Message {
|
|
|
|
|
match self {
|
2026-04-07 20:15:31 -04:00
|
|
|
Self::System(m) | Self::Message(m) | Self::Dmn(m) => m,
|
2026-04-04 00:29:11 -04:00
|
|
|
Self::Memory { message, .. } => message,
|
2026-04-07 01:23:22 -04:00
|
|
|
Self::Log(_) => panic!("Log entries have no API message"),
|
2026-04-04 00:29:11 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn is_memory(&self) -> bool {
|
|
|
|
|
matches!(self, Self::Memory { .. })
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-06 21:48:12 -04:00
|
|
|
pub fn is_dmn(&self) -> bool {
|
|
|
|
|
matches!(self, Self::Dmn(_))
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-07 01:23:22 -04:00
|
|
|
pub fn is_log(&self) -> bool {
|
|
|
|
|
matches!(self, Self::Log(_))
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-04 00:29:11 -04:00
|
|
|
/// Get a reference to the inner message.
|
2026-04-07 01:23:22 -04:00
|
|
|
/// Panics on Log entries.
|
2026-04-04 00:29:11 -04:00
|
|
|
pub fn message(&self) -> &Message {
|
|
|
|
|
match self {
|
2026-04-07 20:15:31 -04:00
|
|
|
Self::System(m) | Self::Message(m) | Self::Dmn(m) => m,
|
2026-04-04 00:29:11 -04:00
|
|
|
Self::Memory { message, .. } => message,
|
2026-04-07 01:23:22 -04:00
|
|
|
Self::Log(_) => panic!("Log entries have no message"),
|
2026-04-04 00:29:11 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Get a mutable reference to the inner message.
|
2026-04-07 01:23:22 -04:00
|
|
|
/// Panics on Log entries.
|
2026-04-04 00:29:11 -04:00
|
|
|
pub fn message_mut(&mut self) -> &mut Message {
|
|
|
|
|
match self {
|
2026-04-07 20:15:31 -04:00
|
|
|
Self::System(m) | Self::Message(m) | Self::Dmn(m) => m,
|
2026-04-04 00:29:11 -04:00
|
|
|
Self::Memory { message, .. } => message,
|
2026-04-07 01:23:22 -04:00
|
|
|
Self::Log(_) => panic!("Log entries have no message"),
|
2026-04-04 00:29:11 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:15:31 -04:00
|
|
|
impl ContextState {
|
|
|
|
|
/// Render journal entries into a single text block.
|
|
|
|
|
pub fn render_journal(&self) -> String {
|
|
|
|
|
if self.journal.is_empty() { return String::new(); }
|
|
|
|
|
let mut text = String::from("[Earlier — from your journal]\n\n");
|
|
|
|
|
for e in self.journal.entries() {
|
|
|
|
|
use std::fmt::Write;
|
|
|
|
|
if let Some(ts) = &e.timestamp {
|
|
|
|
|
writeln!(text, "## {}\n{}\n",
|
|
|
|
|
ts.format("%Y-%m-%dT%H:%M"),
|
|
|
|
|
e.entry.message().content_text()).ok();
|
|
|
|
|
} else {
|
|
|
|
|
text.push_str(&e.entry.message().content_text());
|
|
|
|
|
text.push_str("\n\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
text
|
2026-04-06 21:48:12 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-07 20:15:31 -04:00
|
|
|
/// Render identity files + working stack into a single user message.
|
2026-04-04 00:29:11 -04:00
|
|
|
pub fn render_context_message(&self) -> String {
|
2026-04-07 20:15:31 -04:00
|
|
|
let mut parts: Vec<String> = self.identity.entries().iter()
|
|
|
|
|
.map(|e| e.entry.message().content_text().to_string())
|
2026-04-04 00:29:11 -04:00
|
|
|
.collect();
|
2026-04-04 17:00:01 -04:00
|
|
|
let instructions = std::fs::read_to_string(working_stack::instructions_path()).unwrap_or_default();
|
2026-04-04 00:29:11 -04:00
|
|
|
let mut stack_section = instructions;
|
|
|
|
|
if self.working_stack.is_empty() {
|
|
|
|
|
stack_section.push_str("\n## Current stack\n\n(empty)\n");
|
|
|
|
|
} else {
|
|
|
|
|
stack_section.push_str("\n## Current stack\n\n");
|
|
|
|
|
for (i, item) in self.working_stack.iter().enumerate() {
|
|
|
|
|
if i == self.working_stack.len() - 1 {
|
|
|
|
|
stack_section.push_str(&format!("→ {}\n", item));
|
|
|
|
|
} else {
|
|
|
|
|
stack_section.push_str(&format!(" [{}] {}\n", i, item));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
parts.push(stack_section);
|
|
|
|
|
parts.join("\n\n---\n\n")
|
|
|
|
|
}
|
|
|
|
|
}
|