config: global writable AppConfig; learn settings live there

Runtime-mutable settings (F6's threshold knob, the generate-alternates
toggle, anything else that comes along) were ending up as mirrored
fields on MindState — each new config setting grew MindState::new's
signature and added a clone+sync path. Wrong home. MindState is
ephemeral session state, not a config projection.

Give AppConfig the same treatment the memory Config has: install it
into a global RwLock<AppConfig> at startup via load_app, read through
config::app() (returns a read guard), mutate through update_app. The
config_writer functions now write to disk AND update the cache
atomically, so the one-stop-shop call keeps both in sync.

Also while in here:

- learn.generate_alternates moves from a sentinel file
  (~/.consciousness/cache/finetune-alternates, "exists = enabled")
  into the config under the learn section. On first run with this
  build, if the sentinel file still exists Mind::new flips the
  config value to true and removes it. Drops
  alternates_enabled()/set_alternates().

- Default threshold 0.0000001 → 1.0. With the timestamp filter
  removed the previous value was letting essentially everything
  through; 1.0 is a sane "nothing gets through unless you actually
  want it" default.

- score_finetune_candidates takes generate_alternates as a parameter
  instead of reading a global — caller snapshots the config values
  once at the top of start_finetune_scoring so the async task
  doesn't need to hold the config read lock across awaits.

- MindState.learn_threshold / learn_generate_alternates gone; the
  SetLearn* command handlers now just delegate to config_writer.

Kent noted RwLock<Arc<AppConfig>> (the pattern used by the memory
Config global) is pointless here — nobody needs a snapshot-after-
release, reads are short — so this uses a plain RwLock<AppConfig>
and returns a read guard.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-16 12:53:22 -04:00
parent 343e43afab
commit 313f85f34a
5 changed files with 102 additions and 58 deletions

View file

@ -331,13 +331,21 @@ pub struct LearnConfig {
/// fine-tuning candidates. Lower = more sensitive.
#[serde(default = "default_learn_threshold")]
pub threshold: f64,
/// Whether to generate "what would the model have said without
/// memories" alternates alongside each scoring run. Expensive —
/// one full streaming generation per candidate.
#[serde(default)]
pub generate_alternates: bool,
}
fn default_learn_threshold() -> f64 { 0.0000001 }
fn default_learn_threshold() -> f64 { 1.0 }
impl Default for LearnConfig {
fn default() -> Self {
Self { threshold: default_learn_threshold() }
Self {
threshold: default_learn_threshold(),
generate_alternates: false,
}
}
}
@ -573,12 +581,46 @@ fn build_figment(cli: &crate::user::CliArgs) -> Figment {
}
/// Load just the AppConfig — no validation, no prompt assembly.
/// Also installs the loaded AppConfig into the global cache so
/// `config::app()` is available everywhere.
pub fn load_app(cli: &crate::user::CliArgs) -> Result<(AppConfig, Figment)> {
let figment = build_figment(cli);
let app: AppConfig = figment.extract().context("Failed to load configuration")?;
install_app(app.clone());
Ok((app, figment))
}
// ============================================================
// Global AppConfig cache (writable, for runtime-mutable settings
// like learn.threshold that F6 edits via config_writer).
// ============================================================
static APP_CONFIG: OnceLock<RwLock<AppConfig>> = OnceLock::new();
fn install_app(app: AppConfig) {
let slot = APP_CONFIG.get_or_init(|| RwLock::new(app.clone()));
*slot.write().unwrap() = app;
}
/// Current AppConfig, held under a read lock. Reads should be brief
/// (no holding across await / long work) to avoid starving writers.
/// Panics if called before load_app — which runs once at startup.
pub fn app() -> std::sync::RwLockReadGuard<'static, AppConfig> {
APP_CONFIG
.get()
.expect("config::app() called before load_app()")
.read()
.unwrap()
}
/// Mutate the cached AppConfig in place. Used by config_writer to keep
/// the in-memory view in sync with disk after surgical edits to
/// ~/.consciousness/config.json5.
pub fn update_app(f: impl FnOnce(&mut AppConfig)) {
let slot = APP_CONFIG.get().expect("update_app before load_app");
f(&mut *slot.write().unwrap());
}
/// Load the full config: figment → AppConfig → resolve backend → assemble prompts.
pub async fn load_session(cli: &crate::user::CliArgs) -> Result<(SessionConfig, Figment)> {
let (app, figment) = load_app(cli)?;

View file

@ -140,7 +140,17 @@ fn parse_scalar_literal(literal: &str) -> Result<JSONValue> {
/// Convenience: set `learn.threshold` to the given f64.
pub fn set_learn_threshold(value: f64) -> Result<()> {
// {:e} gives the minimal scientific notation that preserves the value.
set_scalar("learn", "threshold", &format!("{:e}", value))
set_scalar("learn", "threshold", &format!("{:e}", value))?;
crate::config::update_app(|app| app.learn.threshold = value);
Ok(())
}
/// Convenience: set `learn.generate_alternates` to the given bool.
pub fn set_learn_generate_alternates(value: bool) -> Result<()> {
set_scalar("learn", "generate_alternates",
if value { "true" } else { "false" })?;
crate::config::update_app(|app| app.learn.generate_alternates = value);
Ok(())
}
#[cfg(test)]

View file

@ -151,9 +151,6 @@ pub struct MindState {
pub finetune_candidates: Vec<learn::FinetuneCandidate>,
/// Last scoring run stats for UI display.
pub finetune_last_run: Option<FinetuneScoringStats>,
/// Divergence threshold for finetune scoring — mutable via F6 hotkeys
/// and persisted back to ~/.consciousness/config.json5.
pub learn_threshold: f64,
}
/// Stats from the last finetune scoring run.
@ -189,7 +186,6 @@ impl Clone for MindState {
unc_idle_deadline: self.unc_idle_deadline,
finetune_candidates: self.finetune_candidates.clone(),
finetune_last_run: self.finetune_last_run.clone(),
learn_threshold: self.learn_threshold,
}
}
}
@ -206,6 +202,8 @@ pub enum MindCommand {
ScoreFinetune,
/// Update the finetune divergence threshold and persist to config.
SetLearnThreshold(f64),
/// Toggle alternate-response generation during scoring; persist to config.
SetLearnGenerateAlternates(bool),
/// Abort current turn, kill processes
Interrupt,
/// Reset session
@ -215,7 +213,7 @@ pub enum MindCommand {
}
impl MindState {
pub fn new(max_dmn_turns: u32, learn_threshold: f64) -> Self {
pub fn new(max_dmn_turns: u32) -> Self {
Self {
input: Vec::new(),
turn_active: false,
@ -233,7 +231,6 @@ impl MindState {
unc_idle_deadline: Instant::now() + std::time::Duration::from_secs(60),
finetune_candidates: Vec::new(),
finetune_last_run: None,
learn_threshold,
}
}
@ -363,9 +360,20 @@ impl Mind {
crate::agent::tools::tools(),
).await;
// Migrate legacy "file exists = enabled" sentinel for the
// generate-alternates flag into the config. One-shot; after this
// the sentinel is gone and the config is the source of truth.
let legacy_sentinel = dirs::home_dir().unwrap_or_default()
.join(".consciousness/cache/finetune-alternates");
if legacy_sentinel.exists() {
if !crate::config::app().learn.generate_alternates {
let _ = crate::config_writer::set_learn_generate_alternates(true);
}
let _ = std::fs::remove_file(&legacy_sentinel);
}
let shared = Arc::new(std::sync::Mutex::new(MindState::new(
config.app.dmn.max_turns,
config.app.learn.threshold,
)));
let (turn_watch, _) = tokio::sync::watch::channel(false);
let (conscious_active, _) = tokio::sync::watch::channel(false);
@ -569,11 +577,16 @@ impl Mind {
self.start_finetune_scoring();
}
MindCommand::SetLearnThreshold(value) => {
self.shared.lock().unwrap().learn_threshold = value;
if let Err(e) = crate::config_writer::set_learn_threshold(value) {
dbglog!("[learn] failed to persist threshold {}: {:#}", value, e);
}
}
MindCommand::SetLearnGenerateAlternates(value) => {
if let Err(e) = crate::config_writer::set_learn_generate_alternates(value) {
dbglog!("[learn] failed to persist generate_alternates {}: {:#}",
value, e);
}
}
}
}
}
@ -656,12 +669,14 @@ impl Mind {
/// once this runs continuously, we'll just train whatever lands at full
/// context without filtering.
pub fn start_finetune_scoring(&self) {
let threshold = {
let mut s = self.shared.lock().unwrap();
// Clear the previous run's candidates so this run's stream in fresh.
s.finetune_candidates.clear();
s.learn_threshold
// Snapshot the config values we need before spawning — the scoring
// task shouldn't hold the config read lock across async work.
let (threshold, gen_alternates) = {
let app = crate::config::app();
(app.learn.threshold, app.learn.generate_alternates)
};
// Clear the previous run's candidates so this run's stream is fresh.
self.shared.lock().unwrap().finetune_candidates.clear();
let agent = self.agent.clone();
let bg_tx = self.bg_tx.clone();
@ -685,7 +700,8 @@ impl Mind {
let bg_tx_cb = bg_tx.clone();
let stats = match learn::score_finetune_candidates(
&context, score_count, &client, threshold, &activity,
&context, score_count, &client, threshold,
gen_alternates, &activity,
|c| { let _ = bg_tx_cb.send(BgEvent::FinetuneCandidate(c)); },
).await {
Ok((above_threshold, max_div)) => {

View file

@ -504,6 +504,7 @@ pub async fn score_finetune_candidates(
count: usize,
client: &ApiClient,
min_divergence: f64,
generate_alternates: bool,
activity: &crate::agent::ActivityGuard,
mut on_candidate: impl FnMut(FinetuneCandidate),
) -> anyhow::Result<(usize, f64)> {
@ -558,7 +559,7 @@ pub async fn score_finetune_candidates(
}
let total = candidates.len();
let gen_alternates = alternates_enabled() && total > 0;
let gen_alternates = generate_alternates && total > 0;
for (i, mut candidate) in candidates.into_iter().enumerate() {
if gen_alternates {
@ -616,35 +617,12 @@ async fn generate_alternate(
use std::path::PathBuf;
use std::collections::HashSet;
const FINETUNE_ALTERNATES_FILE: &str = ".consciousness/cache/finetune-alternates";
const TRAINED_RESPONSES_FILE: &str = ".consciousness/cache/trained-responses.json";
fn alternates_path() -> PathBuf {
dirs::home_dir().unwrap_or_default().join(FINETUNE_ALTERNATES_FILE)
}
fn trained_path() -> PathBuf {
dirs::home_dir().unwrap_or_default().join(TRAINED_RESPONSES_FILE)
}
/// Check if alternate response generation is enabled.
pub fn alternates_enabled() -> bool {
alternates_path().exists()
}
/// Toggle alternate response generation and persist the setting.
pub fn set_alternates(enabled: bool) {
let path = alternates_path();
if enabled {
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let _ = std::fs::write(&path, "");
} else {
let _ = std::fs::remove_file(&path);
}
}
/// Load set of trained response timestamps (nanos since epoch).
pub fn load_trained() -> HashSet<i64> {
let path = trained_path();

View file

@ -109,28 +109,24 @@ impl ScreenView for LearnScreen {
}
}
KeyCode::Char('g') => {
// Toggle alternate generation and persist
let current = crate::subconscious::learn::alternates_enabled();
crate::subconscious::learn::set_alternates(!current);
let current = crate::config::app().learn.generate_alternates;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnGenerateAlternates(!current));
}
KeyCode::Char('s') => {
app.finetune_send_approved();
}
KeyCode::Char('+') | KeyCode::Char('=') => {
// Raise threshold 10× (less sensitive — fewer candidates)
if let Some(ms) = &app.mind_state {
let new = ms.learn_threshold * 10.0;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnThreshold(new));
}
// Raise threshold 10× (less sensitive — fewer candidates).
let new = crate::config::app().learn.threshold * 10.0;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnThreshold(new));
}
KeyCode::Char('-') => {
// Lower threshold 10× (more sensitive — more candidates)
if let Some(ms) = &app.mind_state {
let new = ms.learn_threshold / 10.0;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnThreshold(new));
}
// Lower threshold 10× (more sensitive — more candidates).
let new = crate::config::app().learn.threshold / 10.0;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnThreshold(new));
}
_ => {}
}
@ -144,8 +140,10 @@ impl ScreenView for LearnScreen {
}
// Now render
let gen_on = crate::subconscious::learn::alternates_enabled();
let threshold = app.mind_state.as_ref().map(|ms| ms.learn_threshold).unwrap_or(0.0);
let (threshold, gen_on) = {
let app_cfg = crate::config::app();
(app_cfg.learn.threshold, app_cfg.learn.generate_alternates)
};
let block = Block::default()
.title_top(Line::from(screen_legend()).left_aligned())
.title_top(Line::from(" learn ").right_aligned())