From 46b4f6f434183e7663740b6029b4921c6f3e4ccd Mon Sep 17 00:00:00 2001 From: ProofOfConcept Date: Sat, 14 Mar 2026 20:05:53 -0400 Subject: [PATCH] scoring: configurable agent_budget, squared Elo distribution MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit agent_budget config (default 1000) replaces health-metric-computed totals. The budget is the total agent runs per cycle — use it all. Elo distribution is squared for power-law unfairness: top-rated agents get disproportionately more runs. If linker has Elo 1123 and connector has 876, linker gets ~7x more runs (squared ratio) vs ~3.5x (linear). Minimum 2 runs per type so underperformers still get evaluated. No Elo file → equal distribution as fallback. Co-Authored-By: Kent Overstreet --- poc-memory/src/config.rs | 6 +++ poc-memory/src/neuro/scoring.rs | 77 +++++++++++++++++++-------------- 2 files changed, 50 insertions(+), 33 deletions(-) diff --git a/poc-memory/src/config.rs b/poc-memory/src/config.rs index 78cc446..13258e4 100644 --- a/poc-memory/src/config.rs +++ b/poc-memory/src/config.rs @@ -51,6 +51,8 @@ pub struct Config { pub context_groups: Vec, /// Max concurrent LLM calls in the daemon. pub llm_concurrency: usize, + /// Total agent runs per consolidation cycle. + pub agent_budget: usize, /// Directory containing prompt templates for agents. pub prompts_dir: PathBuf, /// Separate Claude config dir for background agent work (daemon jobs). @@ -83,6 +85,7 @@ impl Default for Config { }, ], llm_concurrency: 1, + agent_budget: 1000, prompts_dir: home.join("poc/memory/prompts"), agent_config_dir: None, } @@ -141,6 +144,9 @@ impl Config { if let Some(n) = cfg.get("llm_concurrency").and_then(|v| v.as_u64()) { config.llm_concurrency = n.max(1) as usize; } + if let Some(n) = cfg.get("agent_budget").and_then(|v| v.as_u64()) { + config.agent_budget = n as usize; + } if let Some(s) = cfg.get("prompts_dir").and_then(|v| v.as_str()) { config.prompts_dir = expand_home(s); } diff --git a/poc-memory/src/neuro/scoring.rs b/poc-memory/src/neuro/scoring.rs index 2307a1b..44b6fa1 100644 --- a/poc-memory/src/neuro/scoring.rs +++ b/poc-memory/src/neuro/scoring.rs @@ -358,47 +358,58 @@ fn consolidation_plan_inner(store: &Store, detect_interf: bool) -> Consolidation nodes_per_community)); } - // Rebalance using Elo ratings if available + // Distribute agent budget using Elo ratings + let budget = crate::config::get().agent_budget; let elo_path = crate::config::get().data_dir.join("agent-elo.json"); if let Ok(elo_json) = std::fs::read_to_string(&elo_path) { if let Ok(ratings) = serde_json::from_str::>(&elo_json) { - let total_budget = plan.replay_count + plan.linker_count - + plan.separator_count + plan.transfer_count - + plan.organize_count + plan.connector_count; + let types = [ + "replay", "linker", "separator", "transfer", + "organize", "connector", + ]; + let elos: Vec = types.iter() + .map(|t| ratings.get(*t).copied().unwrap_or(1000.0)) + .collect(); + let min_elo = elos.iter().copied().fold(f64::MAX, f64::min); - if total_budget > 0 { - // Convert Elo to weights: subtract min, add 1 to avoid zero - let types = [ - "replay", "linker", "separator", "transfer", - "organize", "connector", - ]; - let elos: Vec = types.iter() - .map(|t| ratings.get(*t).copied().unwrap_or(1000.0)) - .collect(); - let min_elo = elos.iter().copied().fold(f64::MAX, f64::min); - let weights: Vec = elos.iter() - .map(|e| (e - min_elo + 100.0)) // shift so lowest gets 100 - .collect(); - let total_weight: f64 = weights.iter().sum(); + // Square the shifted ratings for unfair distribution — + // top agents get disproportionately more runs + let weights: Vec = elos.iter() + .map(|e| { + let shifted = e - min_elo + 50.0; // lowest gets 50 + shifted * shifted // square for power-law distribution + }) + .collect(); + let total_weight: f64 = weights.iter().sum(); - let allocate = |w: f64| -> usize { - ((w / total_weight * total_budget as f64).round() as usize).max(1) - }; + let allocate = |w: f64| -> usize { + ((w / total_weight * budget as f64).round() as usize).max(2) + }; - plan.replay_count = allocate(weights[0]); - plan.linker_count = allocate(weights[1]); - plan.separator_count = allocate(weights[2]); - plan.transfer_count = allocate(weights[3]); - plan.organize_count = allocate(weights[4]); - plan.connector_count = allocate(weights[5]); + plan.replay_count = allocate(weights[0]); + plan.linker_count = allocate(weights[1]); + plan.separator_count = allocate(weights[2]); + plan.transfer_count = allocate(weights[3]); + plan.organize_count = allocate(weights[4]); + plan.connector_count = allocate(weights[5]); - plan.rationale.push(format!( - "Elo rebalance (budget={}): replay={} linker={} separator={} transfer={} organize={} connector={}", - total_budget, - plan.replay_count, plan.linker_count, plan.separator_count, - plan.transfer_count, plan.organize_count, plan.connector_count)); - } + plan.rationale.push(format!( + "Elo allocation (budget={}): replay={} linker={} separator={} transfer={} organize={} connector={}", + budget, + plan.replay_count, plan.linker_count, plan.separator_count, + plan.transfer_count, plan.organize_count, plan.connector_count)); } + } else { + // No Elo file — use budget with equal distribution + let per_type = budget / 6; + plan.replay_count = per_type; + plan.linker_count = per_type; + plan.separator_count = per_type; + plan.transfer_count = per_type; + plan.organize_count = per_type; + plan.connector_count = per_type; + plan.rationale.push(format!( + "No Elo ratings — equal distribution ({} each, budget={})", per_type, budget)); } plan