training: per-node scoring with graph weight updates

Memory scoring now uses the graph as source of truth:
- last_scored timestamp on each node (new capnp field @22)
- Nodes scored when older than scoring_interval_secs (default 1hr)
- Oldest-scored-first ordering
- Window: scoring_response_window assistant responses (default 100)
- First-quarter memories scored even without full window
- Per-response normalization (raw divergence / response count)
- Asymmetric weight update: alpha=0.5 up, alpha=0.1 down
  (responds fast to importance, decays slowly — memories stay
  surfaced even if only useful 1/4 of the time)

Graph writes disabled pending normalization calibration.

Also: configurable scoring_interval_secs and scoring_response_window.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
ProofOfConcept 2026-04-04 05:01:49 -04:00 committed by Kent Overstreet
parent b0603fd1ef
commit fcd77fb79e
8 changed files with 109 additions and 64 deletions

View file

@ -56,6 +56,8 @@ fn default_true() -> bool { true }
fn default_context_window() -> usize { 128_000 }
fn default_stream_timeout() -> u64 { 60 }
fn default_scoring_chunk_tokens() -> usize { 50_000 }
fn default_scoring_interval_secs() -> u64 { 3600 } // 1 hour
fn default_scoring_response_window() -> usize { 100 }
fn default_identity_dir() -> PathBuf {
dirs::home_dir().unwrap_or_default().join(".consciousness/identity")
}
@ -97,6 +99,12 @@ pub struct Config {
/// Max tokens per chunk for memory scoring logprobs calls.
#[serde(default = "default_scoring_chunk_tokens")]
pub scoring_chunk_tokens: usize,
/// How often to re-score memory nodes (seconds). Default: 3600 (1 hour).
#[serde(default = "default_scoring_interval_secs")]
pub scoring_interval_secs: u64,
/// Number of assistant responses to score per memory. Default: 50.
#[serde(default = "default_scoring_response_window")]
pub scoring_response_window: usize,
pub api_reasoning: String,
pub agent_types: Vec<String>,
/// Surface agent timeout in seconds.
@ -145,6 +153,8 @@ impl Default for Config {
api_context_window: default_context_window(),
api_stream_timeout_secs: default_stream_timeout(),
scoring_chunk_tokens: default_scoring_chunk_tokens(),
scoring_interval_secs: default_scoring_interval_secs(),
scoring_response_window: default_scoring_response_window(),
agent_model: None,
api_reasoning: "high".to_string(),
agent_types: vec![