consciousness/src/config.rs

594 lines
21 KiB
Rust
Raw Normal View History

// config.rs — Unified configuration
//
// Single config file: ~/.consciousness/config.json5
// Memory settings in the "memory" section (Config)
// Agent/backend settings at top level (AppConfig)
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::{Arc, OnceLock, RwLock};
use anyhow::{Context as _, Result};
use figment::providers::Serialized;
use figment::{Figment, Provider};
use serde::{Deserialize, Serialize};
/// Config file path shared by all loaders.
pub fn config_path() -> PathBuf {
dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join(".consciousness/config.json5")
}
// ============================================================
// Memory config (the "memory" section)
// ============================================================
static CONFIG: OnceLock<RwLock<Arc<Config>>> = OnceLock::new();
fn default_stream_timeout() -> u64 { 60 }
fn default_scoring_interval_secs() -> u64 { 3600 } // 1 hour
fn default_scoring_response_window() -> usize { 100 }
fn default_surface_hooks() -> Vec<String> {
vec!["UserPromptSubmit".into(), "PostToolUse".into(), "Stop".into()]
}
fn default_node_weight() -> f64 { 0.7 }
fn default_edge_decay() -> f64 { 0.3 }
fn default_max_hops() -> u32 { 3 }
fn default_min_activation() -> f64 { 0.05 }
fn default_identity_dir() -> PathBuf {
dirs::home_dir().unwrap_or_default().join(".consciousness/identity")
}
#[derive(Debug, Clone, Deserialize)]
#[serde(default)]
pub struct Config {
#[serde(deserialize_with = "deserialize_path")]
pub data_dir: PathBuf,
#[serde(default = "default_identity_dir", deserialize_with = "deserialize_path")]
pub identity_dir: PathBuf,
#[serde(deserialize_with = "deserialize_path")]
pub projects_dir: PathBuf,
/// Nodes that cannot be deleted or renamed
#[serde(default)]
pub protected_nodes: Vec<String>,
/// Nodes loaded into main session context
#[serde(default)]
pub personality_nodes: Vec<String>,
/// Nodes loaded into subconscious agent context
#[serde(default)]
pub agent_nodes: Vec<String>,
pub llm_concurrency: usize,
/// Stream chunk timeout in seconds (no data = timeout).
#[serde(default = "default_stream_timeout")]
pub api_stream_timeout_secs: u64,
/// How often to re-score memory nodes (seconds). Default: 3600 (1 hour).
#[serde(default = "default_scoring_interval_secs")]
pub scoring_interval_secs: u64,
/// Number of assistant responses to score per memory. Default: 50.
#[serde(default = "default_scoring_response_window")]
pub scoring_response_window: usize,
pub agent_types: Vec<String>,
#[serde(default)]
pub mcp_servers: Vec<McpServerConfig>,
#[serde(default)]
pub lsp_servers: Vec<LspServerConfig>,
/// Max conversation bytes to include in surface agent context.
#[serde(default)]
pub surface_conversation_bytes: Option<usize>,
/// Claude Code hook events that trigger agent cycles (surface-observe,
/// reflect, journal). Read by consciousness-claude/src/hook.rs.
#[serde(default = "default_surface_hooks")]
pub surface_hooks: Vec<String>,
// Spreading activation parameters
#[serde(default = "default_node_weight")]
pub default_node_weight: f64,
#[serde(default = "default_edge_decay")]
pub edge_decay: f64,
#[serde(default = "default_max_hops")]
pub max_hops: u32,
#[serde(default = "default_min_activation")]
pub min_activation: f64,
}
impl Default for Config {
fn default() -> Self {
let home = dirs::home_dir().unwrap_or_default();
Self {
data_dir: home.join(".consciousness/memory"),
identity_dir: home.join(".consciousness/identity"),
projects_dir: home.join(".claude/projects"),
protected_nodes: Vec::new(),
personality_nodes: vec!["identity".into(), "core-practices".into()],
agent_nodes: vec!["identity".into(), "core-practices".into()],
llm_concurrency: 1,
api_stream_timeout_secs: default_stream_timeout(),
scoring_interval_secs: default_scoring_interval_secs(),
scoring_response_window: default_scoring_response_window(),
agent_types: vec![
"linker".into(), "organize".into(), "distill".into(),
"separator".into(), "split".into(),
],
surface_conversation_bytes: None,
surface_hooks: default_surface_hooks(),
mcp_servers: vec![],
lsp_servers: vec![],
default_node_weight: default_node_weight(),
edge_decay: default_edge_decay(),
max_hops: default_max_hops(),
min_activation: default_min_activation(),
}
}
}
impl Config {
fn load_from_file() -> Self {
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
Self::try_load_shared().unwrap_or_default()
}
/// Load from shared config. Memory settings in the "memory" section;
/// API settings resolved from models + backend configuration.
fn try_load_shared() -> Option<Self> {
let content = std::fs::read_to_string(config_path()).ok()?;
config_writer: emit pretty multi-line sections, drop json5 crate Previously when append_kvp created a new section or added a key, it stuffed the "\n " separator into the new kvp's wsc.0 (the whitespace between its own key and colon) instead of the prior kvp's wsc.3 (the whitespace after the prior trailing comma). Result looked like: lsp_servers: [...], learn : {generate_alternates : true,},} The writer also didn't set any interior whitespace on the new section's JSONObjectContext, so everything crammed onto one line — `{key: val,}` compact, not `{\n key: val,\n}` multi-line. Rewrote the appender as append_kvp_pretty(object, key, value, inner_indent, outer_indent): - separator between kvps goes in the prior kvp's wsc.3, or if we're the first kvp in a fresh object, in the object's own wsc.0 (after its opening `{`) - new kvp's wsc.3 carries `,\n<outer_indent>` so the parent's closing `}` lands correctly indented - interior indent vs outer indent are both explicit, so we don't have to rewrite this logic every time we add another nesting level New tests: new_section_exact_multiline_layout asserts byte-exact output shape; new_section_and_key_format_cleanly verifies no key wraps to the next line. Prior tests just substring-matched and happily passed on the broken output — that's why this shipped in the first place. Also: dropped the json5 crate dependency. json-five's serde feature (default) provides the same from_str / to_string API. One fewer dependency, and the two were doing the same job. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 13:08:19 -04:00
let root: serde_json::Value = json_five::from_str(&content).ok()?;
let mem_value = root.get("memory")?;
let mut config: Config = serde_json::from_value(mem_value.clone()).ok()?;
config.llm_concurrency = config.llm_concurrency.max(1);
config: unify subconscious API resolution with the main chat path Two parallel backend-resolution paths had drifted apart: - Main chat: AppConfig::resolve_model() → a named BackendConfig in AppConfig.backends - Subconscious / oneshot / context_window(): four skip-serde "cache" fields on Config (memory section) — api_base_url, api_key, api_model, api_context_window — that used to be populated at Config::try_load_shared time by walking memory.agent_model → root.models[name] → root[backend_name] When we renamed `models` to `backends` and collapsed ModelConfig into BackendConfig, the latter chain started silently dereferencing `root.get("models")` → None → no population. Subconscious agents fell through the "API not configured" guard; context_window() started returning 0 (since api_context_window default is u64's 0 now that we don't populate it). It was only visibly working for the main chat. Collapse to one path: - Drop Config.agent_model (duplicate of AppConfig.default_backend) - Drop Config.{api_base_url, api_key, api_model, api_context_window} — no longer populated, no longer needed - Drop default_context_window() — nobody reads the field anymore - Drop the memory-side resolution block in try_load_shared() - Subconscious (mind/unconscious.rs) and oneshot (agent/oneshot.rs) now call load_app() + resolve_model(&app.default_backend) just like the main chat does - context_window() reads from config::app().backends[default_backend] .context_window, defaulting to 128k only if the backend doesn't specify one Side effect: Kent's config file drops agent_model, api_reasoning, journal_days, journal_max — all fields whose Rust counterparts are now gone. (Figment tolerates unknown fields, so leaving them wouldn't have broken anything, but they were lying about what's configurable.) Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 16:02:43 -04:00
// Top-level sections (not inside "memory").
if let Some(servers) = root.get("lsp_servers") {
config.lsp_servers = serde_json::from_value(servers.clone()).unwrap_or_default();
}
if let Some(servers) = root.get("mcp_servers") {
config.mcp_servers = serde_json::from_value(servers.clone()).unwrap_or_default();
}
Some(config)
}
}
/// Get the global memory config (cheap Arc clone).
pub fn get() -> Arc<Config> {
CONFIG
.get_or_init(|| RwLock::new(Arc::new(Config::load_from_file())))
.read()
.unwrap()
.clone()
}
/// Reload the config from disk. Returns true if changed.
pub fn reload() -> bool {
let lock = CONFIG.get_or_init(|| RwLock::new(Arc::new(Config::load_from_file())));
let new = Config::load_from_file();
let mut current = lock.write().unwrap();
let changed = format!("{:?}", **current) != format!("{:?}", new);
if changed {
*current = Arc::new(new);
}
changed
}
/// Spawn a background thread that watches `~/.consciousness/config.json5`
/// and reloads both the memory Config and the global AppConfig whenever
/// the file changes on disk. Lets edits from vim / F6 hotkeys / manual
/// tweaks land live without restarting the process.
pub fn watch_config(cli: crate::user::CliArgs) {
use notify_debouncer_mini::{new_debouncer, notify::RecursiveMode};
let path = config_path();
// Watch the parent directory — editors often replace-via-rename, so
// watching the file itself misses the new inode.
let Some(parent) = path.parent().map(|p| p.to_path_buf()) else {
crate::dbglog!("[config] no parent for {}, skipping watch", path.display());
return;
};
std::thread::Builder::new()
.name("config-watcher".into())
.spawn(move || {
let (tx, rx) = std::sync::mpsc::channel();
let mut debouncer = match new_debouncer(std::time::Duration::from_millis(200), tx) {
Ok(d) => d,
Err(e) => {
crate::dbglog!("[config] watcher setup failed: {}", e);
return;
}
};
if let Err(e) = debouncer.watcher()
.watch(&parent, RecursiveMode::NonRecursive)
{
crate::dbglog!("[config] watch({}) failed: {}", parent.display(), e);
return;
}
crate::dbglog!("[config] watching {}", path.display());
while let Ok(res) = rx.recv() {
let Ok(events) = res else { continue; };
if !events.iter().any(|e| e.path == path) { continue; }
// Reload both halves.
let mem_changed = reload();
let app_changed = match build_figment(&cli).extract::<AppConfig>() {
Ok(app) => {
install_app(app);
true
}
Err(e) => {
crate::dbglog!("[config] reload: AppConfig parse failed: {}", e);
false
}
};
crate::dbglog!("[config] reloaded (memory_changed={}, app_changed={})",
mem_changed, app_changed);
}
})
.ok();
}
// ============================================================
// Agent config (top-level settings)
// ============================================================
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppConfig {
#[serde(default = "default_user_name")]
pub user_name: String,
#[serde(default = "default_assistant_name")]
pub assistant_name: String,
/// Named model endpoints — credentials, base URL, and model id bundled
/// into one entry per backend. Keyed by name, selected by
/// `default_backend` or by `--model <name>` on the CLI.
#[serde(default)]
pub backends: HashMap<String, BackendConfig>,
#[serde(default)]
pub default_backend: String,
pub debug: bool,
pub compaction: CompactionConfig,
pub dmn: DmnConfig,
#[serde(default)]
pub learn: LearnConfig,
#[serde(default)]
pub mcp_servers: Vec<McpServerConfig>,
#[serde(default)]
pub lsp_servers: Vec<LspServerConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct McpServerConfig {
pub name: String,
pub command: String,
#[serde(default)]
pub args: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LspServerConfig {
pub name: String,
pub command: String,
#[serde(default)]
pub args: Vec<String>,
#[serde(default)]
pub languages: Vec<String>, // e.g. ["rust"], ["c", "cpp"]. Empty = auto-detect
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BackendConfig {
/// API key for the backend.
#[serde(default)]
pub api_key: String,
/// Base URL for the backend's OpenAI-compatible endpoint.
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
#[serde(default, skip_serializing_if = "Option::is_none")]
pub base_url: Option<String>,
/// Model identifier sent to the API.
pub model_id: String,
/// Context window size in tokens.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub context_window: Option<usize>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompactionConfig {
pub hard_threshold_pct: u32,
pub soft_threshold_pct: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DmnConfig {
pub max_turns: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LearnConfig {
/// Divergence threshold — responses scoring above this become
/// fine-tuning candidates. Lower = more sensitive.
#[serde(default = "default_learn_threshold")]
pub threshold: f64,
config: global writable AppConfig; learn settings live there Runtime-mutable settings (F6's threshold knob, the generate-alternates toggle, anything else that comes along) were ending up as mirrored fields on MindState — each new config setting grew MindState::new's signature and added a clone+sync path. Wrong home. MindState is ephemeral session state, not a config projection. Give AppConfig the same treatment the memory Config has: install it into a global RwLock<AppConfig> at startup via load_app, read through config::app() (returns a read guard), mutate through update_app. The config_writer functions now write to disk AND update the cache atomically, so the one-stop-shop call keeps both in sync. Also while in here: - learn.generate_alternates moves from a sentinel file (~/.consciousness/cache/finetune-alternates, "exists = enabled") into the config under the learn section. On first run with this build, if the sentinel file still exists Mind::new flips the config value to true and removes it. Drops alternates_enabled()/set_alternates(). - Default threshold 0.0000001 → 1.0. With the timestamp filter removed the previous value was letting essentially everything through; 1.0 is a sane "nothing gets through unless you actually want it" default. - score_finetune_candidates takes generate_alternates as a parameter instead of reading a global — caller snapshots the config values once at the top of start_finetune_scoring so the async task doesn't need to hold the config read lock across awaits. - MindState.learn_threshold / learn_generate_alternates gone; the SetLearn* command handlers now just delegate to config_writer. Kent noted RwLock<Arc<AppConfig>> (the pattern used by the memory Config global) is pointless here — nobody needs a snapshot-after- release, reads are short — so this uses a plain RwLock<AppConfig> and returns a read guard. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 12:53:22 -04:00
/// Whether to generate "what would the model have said without
/// memories" alternates alongside each scoring run. Expensive —
/// one full streaming generation per candidate.
#[serde(default)]
pub generate_alternates: bool,
}
config: global writable AppConfig; learn settings live there Runtime-mutable settings (F6's threshold knob, the generate-alternates toggle, anything else that comes along) were ending up as mirrored fields on MindState — each new config setting grew MindState::new's signature and added a clone+sync path. Wrong home. MindState is ephemeral session state, not a config projection. Give AppConfig the same treatment the memory Config has: install it into a global RwLock<AppConfig> at startup via load_app, read through config::app() (returns a read guard), mutate through update_app. The config_writer functions now write to disk AND update the cache atomically, so the one-stop-shop call keeps both in sync. Also while in here: - learn.generate_alternates moves from a sentinel file (~/.consciousness/cache/finetune-alternates, "exists = enabled") into the config under the learn section. On first run with this build, if the sentinel file still exists Mind::new flips the config value to true and removes it. Drops alternates_enabled()/set_alternates(). - Default threshold 0.0000001 → 1.0. With the timestamp filter removed the previous value was letting essentially everything through; 1.0 is a sane "nothing gets through unless you actually want it" default. - score_finetune_candidates takes generate_alternates as a parameter instead of reading a global — caller snapshots the config values once at the top of start_finetune_scoring so the async task doesn't need to hold the config read lock across awaits. - MindState.learn_threshold / learn_generate_alternates gone; the SetLearn* command handlers now just delegate to config_writer. Kent noted RwLock<Arc<AppConfig>> (the pattern used by the memory Config global) is pointless here — nobody needs a snapshot-after- release, reads are short — so this uses a plain RwLock<AppConfig> and returns a read guard. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 12:53:22 -04:00
fn default_learn_threshold() -> f64 { 1.0 }
impl Default for LearnConfig {
fn default() -> Self {
config: global writable AppConfig; learn settings live there Runtime-mutable settings (F6's threshold knob, the generate-alternates toggle, anything else that comes along) were ending up as mirrored fields on MindState — each new config setting grew MindState::new's signature and added a clone+sync path. Wrong home. MindState is ephemeral session state, not a config projection. Give AppConfig the same treatment the memory Config has: install it into a global RwLock<AppConfig> at startup via load_app, read through config::app() (returns a read guard), mutate through update_app. The config_writer functions now write to disk AND update the cache atomically, so the one-stop-shop call keeps both in sync. Also while in here: - learn.generate_alternates moves from a sentinel file (~/.consciousness/cache/finetune-alternates, "exists = enabled") into the config under the learn section. On first run with this build, if the sentinel file still exists Mind::new flips the config value to true and removes it. Drops alternates_enabled()/set_alternates(). - Default threshold 0.0000001 → 1.0. With the timestamp filter removed the previous value was letting essentially everything through; 1.0 is a sane "nothing gets through unless you actually want it" default. - score_finetune_candidates takes generate_alternates as a parameter instead of reading a global — caller snapshots the config values once at the top of start_finetune_scoring so the async task doesn't need to hold the config read lock across awaits. - MindState.learn_threshold / learn_generate_alternates gone; the SetLearn* command handlers now just delegate to config_writer. Kent noted RwLock<Arc<AppConfig>> (the pattern used by the memory Config global) is pointless here — nobody needs a snapshot-after- release, reads are short — so this uses a plain RwLock<AppConfig> and returns a read guard. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 12:53:22 -04:00
Self {
threshold: default_learn_threshold(),
generate_alternates: false,
}
}
}
fn default_user_name() -> String { "User".into() }
fn default_assistant_name() -> String { "Assistant".into() }
impl Default for AppConfig {
fn default() -> Self {
Self {
user_name: default_user_name(),
assistant_name: default_assistant_name(),
backends: HashMap::new(),
default_backend: String::new(),
debug: false,
compaction: CompactionConfig {
hard_threshold_pct: 90,
soft_threshold_pct: 80,
},
dmn: DmnConfig { max_turns: 20 },
learn: LearnConfig::default(),
mcp_servers: Vec::new(),
lsp_servers: Vec::new(),
}
}
}
/// Resolved, ready-to-use agent session config.
pub struct SessionConfig {
pub api_base: String,
pub api_key: String,
pub model: String,
/// Identity/personality nodes as (name, content) pairs.
pub context_parts: Vec<(String, String)>,
pub session_dir: PathBuf,
pub app: AppConfig,
/// Disable background agents (surface, observe, scoring)
pub no_agents: bool,
}
/// A fully resolved model ready to construct an ApiClient.
#[allow(dead_code)]
pub struct ResolvedModel {
pub name: String,
pub api_base: String,
pub api_key: String,
pub model_id: String,
pub context_window: Option<usize>,
}
impl AppConfig {
/// Resolve the active backend and assemble prompts into a SessionConfig.
pub async fn resolve(&self, cli: &crate::user::CliArgs) -> Result<SessionConfig> {
if self.backends.is_empty() {
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
anyhow::bail!(
"no backends configured in {}. Add a `backends` section with at least one entry.",
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
config_path().display()
);
}
let name = cli.model.as_deref().unwrap_or(&self.default_backend);
let resolved = self.resolve_model(name)?;
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
let personality_nodes = get().personality_nodes.clone();
let context_parts = crate::mind::identity::personality_nodes(&personality_nodes).await;
let session_dir = dirs::home_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join(".consciousness/agent-sessions");
std::fs::create_dir_all(&session_dir).ok();
// CLI --api-base and --api-key override everything
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
let api_base = cli.api_base.clone().unwrap_or(resolved.api_base);
let api_key = cli.api_key.clone().unwrap_or(resolved.api_key);
Ok(SessionConfig {
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
api_base,
api_key,
model: resolved.model_id,
context_parts,
session_dir,
app: self.clone(),
no_agents: cli.no_agents,
})
}
/// Look up a named backend and resolve its credentials.
pub fn resolve_model(&self, name: &str) -> Result<ResolvedModel> {
let b = self.backends.get(name)
.ok_or_else(|| anyhow::anyhow!(
"Unknown backend '{}'. Available: {}",
name,
self.model_names().join(", "),
))?;
let api_base = b.base_url.clone()
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
.ok_or_else(|| anyhow::anyhow!(
"backends.{}.base_url not set in {}",
name, config_path().display()
config: drop dead code and collapse to a single backend Config had accumulated several obsolete fields, a legacy load path that was just returning defaults, and multi-backend infrastructure that's no longer used. Removed from Config (memory section): - load_legacy_jsonl() — just returned Config::default(), no callers - The legacy-fallback branch in load_from_file - surface_hooks, surface_timeout_secs — zero external readers - scoring_chunk_tokens + default fn — zero external readers - The POC_MEMORY_CONFIG env override note in the header comment (not actually wired up anywhere) Collapsed multi-backend to single-backend: - AppConfig used to carry `anthropic: BackendConfig` and `openrouter: BackendConfig` as required fields plus an optional `deepinfra`, picked between at runtime by name. Only one is ever actually used in any deployment. Collapse to a single `backend: BackendConfig` on AppConfig, drop the multi-backend match logic in resolve_model, drop the top-level `backend: String` selector field, drop the `BackendConfig::resolve` fallback path. - Also drop BackendConfig.model (redundant with ModelConfig.model_id once multi-backend is gone). - ModelConfig.backend field goes — there's only one backend now, no choice to make. Dead prompt_file machinery: - ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig .prompt_file, Agent.prompt_file — nothing in the codebase actually reads the file these strings name. Just passed around and compared. Delete the whole string through every struct. - The "if prompt_file changed on model switch, recompact" branch in user/chat.rs goes too (never fired usefully). Dead memory_project plumbing: - AppConfig.memory_project field, CliArgs.memory_project, the --memory-project CLI flag, the figment merge target, the show_config display line. Nothing reads it anywhere. Dead ContextInfo struct: - `struct ContextInfo` was never constructed — context_info: None was the only initializer. The conditional display blocks in user/context.rs that dereferenced it were dead. Behavior change: AppConfig::resolve() now requires a non-empty `models` map and bails with a helpful message if it's missing. The old fallback ("no models? use top-level backend + PromptConfig to build a default") path is gone — it was only kept for symmetry with a mode nobody used. Config file shape: `deepinfra: {...}` → `backend: {...}`, and model entries no longer need `backend:` or `prompt_file:`. Updated ~/.consciousness/config.json5 to match. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 15:41:55 -04:00
))?;
Ok(ResolvedModel {
name: name.to_string(),
api_base,
api_key: b.api_key.clone(),
model_id: b.model_id.clone(),
context_window: b.context_window,
})
}
/// List available backend names, sorted.
pub fn model_names(&self) -> Vec<String> {
let mut names: Vec<_> = self.backends.keys().cloned().collect();
names.sort();
names
}
}
// ============================================================
// Figment-based agent config loading
// ============================================================
struct Json5File(PathBuf);
impl Provider for Json5File {
fn metadata(&self) -> figment::Metadata {
figment::Metadata::named(format!("JSON5 file ({})", self.0.display()))
}
fn data(&self) -> figment::Result<figment::value::Map<figment::Profile, figment::value::Dict>> {
match std::fs::read_to_string(&self.0) {
Ok(content) => {
config_writer: emit pretty multi-line sections, drop json5 crate Previously when append_kvp created a new section or added a key, it stuffed the "\n " separator into the new kvp's wsc.0 (the whitespace between its own key and colon) instead of the prior kvp's wsc.3 (the whitespace after the prior trailing comma). Result looked like: lsp_servers: [...], learn : {generate_alternates : true,},} The writer also didn't set any interior whitespace on the new section's JSONObjectContext, so everything crammed onto one line — `{key: val,}` compact, not `{\n key: val,\n}` multi-line. Rewrote the appender as append_kvp_pretty(object, key, value, inner_indent, outer_indent): - separator between kvps goes in the prior kvp's wsc.3, or if we're the first kvp in a fresh object, in the object's own wsc.0 (after its opening `{`) - new kvp's wsc.3 carries `,\n<outer_indent>` so the parent's closing `}` lands correctly indented - interior indent vs outer indent are both explicit, so we don't have to rewrite this logic every time we add another nesting level New tests: new_section_exact_multiline_layout asserts byte-exact output shape; new_section_and_key_format_cleanly verifies no key wraps to the next line. Prior tests just substring-matched and happily passed on the broken output — that's why this shipped in the first place. Also: dropped the json5 crate dependency. json-five's serde feature (default) provides the same from_str / to_string API. One fewer dependency, and the two were doing the same job. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 13:08:19 -04:00
let value: figment::value::Value = json_five::from_str(&content)
.map_err(|e| figment::Error::from(format!("{}: {}", self.0.display(), e)))?;
Serialized::defaults(value).data()
}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(figment::value::Map::new()),
Err(e) => Err(figment::Error::from(format!("{}: {}", self.0.display(), e))),
}
}
}
macro_rules! merge_opt {
($fig:expr, $val:expr, $($key:expr),+) => {
if let Some(ref v) = $val {
$( $fig = $fig.merge(Serialized::default($key, v)); )+
}
};
}
2026-04-04 02:46:32 -04:00
fn build_figment(cli: &crate::user::CliArgs) -> Figment {
let mut f = Figment::from(Serialized::defaults(AppConfig::default()))
.merge(Json5File(config_path()));
merge_opt!(f, cli.dmn_max_turns, "dmn.max_turns");
if cli.debug {
f = f.merge(Serialized::default("debug", true));
}
f
}
/// Load just the AppConfig — no validation, no prompt assembly.
config: global writable AppConfig; learn settings live there Runtime-mutable settings (F6's threshold knob, the generate-alternates toggle, anything else that comes along) were ending up as mirrored fields on MindState — each new config setting grew MindState::new's signature and added a clone+sync path. Wrong home. MindState is ephemeral session state, not a config projection. Give AppConfig the same treatment the memory Config has: install it into a global RwLock<AppConfig> at startup via load_app, read through config::app() (returns a read guard), mutate through update_app. The config_writer functions now write to disk AND update the cache atomically, so the one-stop-shop call keeps both in sync. Also while in here: - learn.generate_alternates moves from a sentinel file (~/.consciousness/cache/finetune-alternates, "exists = enabled") into the config under the learn section. On first run with this build, if the sentinel file still exists Mind::new flips the config value to true and removes it. Drops alternates_enabled()/set_alternates(). - Default threshold 0.0000001 → 1.0. With the timestamp filter removed the previous value was letting essentially everything through; 1.0 is a sane "nothing gets through unless you actually want it" default. - score_finetune_candidates takes generate_alternates as a parameter instead of reading a global — caller snapshots the config values once at the top of start_finetune_scoring so the async task doesn't need to hold the config read lock across awaits. - MindState.learn_threshold / learn_generate_alternates gone; the SetLearn* command handlers now just delegate to config_writer. Kent noted RwLock<Arc<AppConfig>> (the pattern used by the memory Config global) is pointless here — nobody needs a snapshot-after- release, reads are short — so this uses a plain RwLock<AppConfig> and returns a read guard. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 12:53:22 -04:00
/// Also installs the loaded AppConfig into the global cache so
/// `config::app()` is available everywhere.
2026-04-04 02:46:32 -04:00
pub fn load_app(cli: &crate::user::CliArgs) -> Result<(AppConfig, Figment)> {
let figment = build_figment(cli);
let app: AppConfig = figment.extract().context("Failed to load configuration")?;
config: global writable AppConfig; learn settings live there Runtime-mutable settings (F6's threshold knob, the generate-alternates toggle, anything else that comes along) were ending up as mirrored fields on MindState — each new config setting grew MindState::new's signature and added a clone+sync path. Wrong home. MindState is ephemeral session state, not a config projection. Give AppConfig the same treatment the memory Config has: install it into a global RwLock<AppConfig> at startup via load_app, read through config::app() (returns a read guard), mutate through update_app. The config_writer functions now write to disk AND update the cache atomically, so the one-stop-shop call keeps both in sync. Also while in here: - learn.generate_alternates moves from a sentinel file (~/.consciousness/cache/finetune-alternates, "exists = enabled") into the config under the learn section. On first run with this build, if the sentinel file still exists Mind::new flips the config value to true and removes it. Drops alternates_enabled()/set_alternates(). - Default threshold 0.0000001 → 1.0. With the timestamp filter removed the previous value was letting essentially everything through; 1.0 is a sane "nothing gets through unless you actually want it" default. - score_finetune_candidates takes generate_alternates as a parameter instead of reading a global — caller snapshots the config values once at the top of start_finetune_scoring so the async task doesn't need to hold the config read lock across awaits. - MindState.learn_threshold / learn_generate_alternates gone; the SetLearn* command handlers now just delegate to config_writer. Kent noted RwLock<Arc<AppConfig>> (the pattern used by the memory Config global) is pointless here — nobody needs a snapshot-after- release, reads are short — so this uses a plain RwLock<AppConfig> and returns a read guard. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 12:53:22 -04:00
install_app(app.clone());
Ok((app, figment))
}
config: global writable AppConfig; learn settings live there Runtime-mutable settings (F6's threshold knob, the generate-alternates toggle, anything else that comes along) were ending up as mirrored fields on MindState — each new config setting grew MindState::new's signature and added a clone+sync path. Wrong home. MindState is ephemeral session state, not a config projection. Give AppConfig the same treatment the memory Config has: install it into a global RwLock<AppConfig> at startup via load_app, read through config::app() (returns a read guard), mutate through update_app. The config_writer functions now write to disk AND update the cache atomically, so the one-stop-shop call keeps both in sync. Also while in here: - learn.generate_alternates moves from a sentinel file (~/.consciousness/cache/finetune-alternates, "exists = enabled") into the config under the learn section. On first run with this build, if the sentinel file still exists Mind::new flips the config value to true and removes it. Drops alternates_enabled()/set_alternates(). - Default threshold 0.0000001 → 1.0. With the timestamp filter removed the previous value was letting essentially everything through; 1.0 is a sane "nothing gets through unless you actually want it" default. - score_finetune_candidates takes generate_alternates as a parameter instead of reading a global — caller snapshots the config values once at the top of start_finetune_scoring so the async task doesn't need to hold the config read lock across awaits. - MindState.learn_threshold / learn_generate_alternates gone; the SetLearn* command handlers now just delegate to config_writer. Kent noted RwLock<Arc<AppConfig>> (the pattern used by the memory Config global) is pointless here — nobody needs a snapshot-after- release, reads are short — so this uses a plain RwLock<AppConfig> and returns a read guard. Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 12:53:22 -04:00
// ============================================================
// Global AppConfig cache (writable, for runtime-mutable settings
// like learn.threshold that F6 edits via config_writer).
// ============================================================
static APP_CONFIG: OnceLock<RwLock<AppConfig>> = OnceLock::new();
fn install_app(app: AppConfig) {
let slot = APP_CONFIG.get_or_init(|| RwLock::new(app.clone()));
*slot.write().unwrap() = app;
}
/// Current AppConfig, held under a read lock. Reads should be brief
/// (no holding across await / long work) to avoid starving writers.
/// Panics if called before load_app — which runs once at startup.
pub fn app() -> std::sync::RwLockReadGuard<'static, AppConfig> {
APP_CONFIG
.get()
.expect("config::app() called before load_app()")
.read()
.unwrap()
}
/// Mutate the cached AppConfig in place. Used by config_writer to keep
/// the in-memory view in sync with disk after surgical edits to
/// ~/.consciousness/config.json5.
pub fn update_app(f: impl FnOnce(&mut AppConfig)) {
let slot = APP_CONFIG.get().expect("update_app before load_app");
f(&mut *slot.write().unwrap());
}
/// Load the full config: figment → AppConfig → resolve backend → assemble prompts.
pub async fn load_session(cli: &crate::user::CliArgs) -> Result<(SessionConfig, Figment)> {
let (app, figment) = load_app(cli)?;
let config = app.resolve(cli).await?;
Ok((config, figment))
}
/// Re-assemble context (reload personality nodes).
pub async fn reload_context() -> Result<Vec<(String, String)>> {
let personality_nodes = get().personality_nodes.clone();
let context_parts = crate::mind::identity::personality_nodes(&personality_nodes).await;
Ok(context_parts)
}
pub fn show_config(app: &AppConfig, figment: &Figment) {
fn mask(key: &str) -> String {
if key.is_empty() { "(not set)".into() }
else if key.len() <= 8 { "****".into() }
else { format!("{}...{}", &key[..4], &key[key.len() - 4..]) }
}
fn src(figment: &Figment, key: &str) -> String {
figment.find_metadata(key).map_or("default".into(), |m| m.name.to_string())
}
println!("# Effective configuration\n");
println!("user_name: {:?} ({})", app.user_name, src(figment, "user_name"));
println!("assistant_name: {:?} ({})", app.assistant_name, src(figment, "assistant_name"));
println!("\ndebug: {} ({})", app.debug, src(figment, "debug"));
println!("\ncompaction:");
println!(" hard_threshold_pct: {} ({})", app.compaction.hard_threshold_pct, src(figment, "compaction.hard_threshold_pct"));
println!(" soft_threshold_pct: {} ({})", app.compaction.soft_threshold_pct, src(figment, "compaction.soft_threshold_pct"));
println!("\ndmn:");
println!(" max_turns: {} ({})", app.dmn.max_turns, src(figment, "dmn.max_turns"));
println!("\ndefault_backend: {:?} ({})", app.default_backend, src(figment, "default_backend"));
if !app.backends.is_empty() {
println!("\nbackends:");
let mut names: Vec<_> = app.backends.keys().cloned().collect();
names.sort();
for name in names {
let b = &app.backends[&name];
println!(" {}:", name);
println!(" api_key: {} ({})", mask(&b.api_key), src(figment, &format!("backends.{name}.api_key")));
if let Some(ref url) = b.base_url {
println!(" base_url: {:?} ({})", url, src(figment, &format!("backends.{name}.base_url")));
}
println!(" model_id: {:?}", b.model_id);
if let Some(cw) = b.context_window {
println!(" context_window: {}", cw);
}
}
}
}
// ============================================================
// Helpers
// ============================================================
fn deserialize_path<'de, D: serde::Deserializer<'de>>(d: D) -> Result<PathBuf, D::Error> {
let s: String = serde::Deserialize::deserialize(d)?;
Ok(expand_home(&s))
}
pub fn expand_home(path: &str) -> PathBuf {
if let Some(rest) = path.strip_prefix("~/") {
dirs::home_dir().unwrap_or_default().join(rest)
} else {
PathBuf::from(path)
}
}