config: drop dead code and collapse to a single backend

Config had accumulated several obsolete fields, a legacy load path
that was just returning defaults, and multi-backend infrastructure
that's no longer used.

Removed from Config (memory section):
- load_legacy_jsonl() — just returned Config::default(), no callers
- The legacy-fallback branch in load_from_file
- surface_hooks, surface_timeout_secs — zero external readers
- scoring_chunk_tokens + default fn — zero external readers
- The POC_MEMORY_CONFIG env override note in the header comment
  (not actually wired up anywhere)

Collapsed multi-backend to single-backend:
- AppConfig used to carry `anthropic: BackendConfig` and
  `openrouter: BackendConfig` as required fields plus an optional
  `deepinfra`, picked between at runtime by name. Only one is ever
  actually used in any deployment. Collapse to a single
  `backend: BackendConfig` on AppConfig, drop the multi-backend
  match logic in resolve_model, drop the top-level `backend: String`
  selector field, drop the `BackendConfig::resolve` fallback path.
- Also drop BackendConfig.model (redundant with ModelConfig.model_id
  once multi-backend is gone).
- ModelConfig.backend field goes — there's only one backend now, no
  choice to make.

Dead prompt_file machinery:
- ModelConfig.prompt_file, ResolvedModel.prompt_file, SessionConfig
  .prompt_file, Agent.prompt_file — nothing in the codebase actually
  reads the file these strings name. Just passed around and compared.
  Delete the whole string through every struct.
- The "if prompt_file changed on model switch, recompact" branch in
  user/chat.rs goes too (never fired usefully).

Dead memory_project plumbing:
- AppConfig.memory_project field, CliArgs.memory_project, the
  --memory-project CLI flag, the figment merge target, the show_config
  display line. Nothing reads it anywhere.

Dead ContextInfo struct:
- `struct ContextInfo` was never constructed — context_info: None
  was the only initializer. The conditional display blocks in
  user/context.rs that dereferenced it were dead.

Behavior change: AppConfig::resolve() now requires a non-empty
`models` map and bails with a helpful message if it's missing. The
old fallback ("no models? use top-level backend + PromptConfig to
build a default") path is gone — it was only kept for symmetry with
a mode nobody used.

Config file shape: `deepinfra: {...}` → `backend: {...}`, and
model entries no longer need `backend:` or `prompt_file:`. Updated
~/.consciousness/config.json5 to match.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-16 15:41:55 -04:00
parent 0e6b5dc8be
commit 2989a6afaa
8 changed files with 37 additions and 205 deletions

View file

@ -3,9 +3,6 @@
// Single config file: ~/.consciousness/config.json5
// Memory settings in the "memory" section (Config)
// Agent/backend settings at top level (AppConfig)
//
// Legacy fallback: ~/.consciousness/config.jsonl
// Env override: POC_MEMORY_CONFIG
use std::collections::HashMap;
use std::path::PathBuf;
@ -31,7 +28,6 @@ static CONFIG: OnceLock<RwLock<Arc<Config>>> = OnceLock::new();
fn default_context_window() -> usize { 128_000 }
fn default_stream_timeout() -> u64 { 60 }
fn default_scoring_chunk_tokens() -> usize { 50_000 }
fn default_scoring_interval_secs() -> u64 { 3600 } // 1 hour
fn default_scoring_response_window() -> usize { 100 }
fn default_node_weight() -> f64 { 0.7 }
@ -83,9 +79,6 @@ pub struct Config {
/// Stream chunk timeout in seconds (no data = timeout).
#[serde(default = "default_stream_timeout")]
pub api_stream_timeout_secs: u64,
/// Max tokens per chunk for memory scoring logprobs calls.
#[serde(default = "default_scoring_chunk_tokens")]
pub scoring_chunk_tokens: usize,
/// How often to re-score memory nodes (seconds). Default: 3600 (1 hour).
#[serde(default = "default_scoring_interval_secs")]
pub scoring_interval_secs: u64,
@ -98,15 +91,9 @@ pub struct Config {
pub mcp_servers: Vec<McpServerConfig>,
#[serde(default)]
pub lsp_servers: Vec<LspServerConfig>,
/// Surface agent timeout in seconds.
#[serde(default)]
pub surface_timeout_secs: Option<u32>,
/// Max conversation bytes to include in surface agent context.
#[serde(default)]
pub surface_conversation_bytes: Option<usize>,
/// Hook events that trigger the surface agent.
#[serde(default)]
pub surface_hooks: Vec<String>,
// Spreading activation parameters
#[serde(default = "default_node_weight")]
@ -141,7 +128,6 @@ impl Default for Config {
api_model: None,
api_context_window: default_context_window(),
api_stream_timeout_secs: default_stream_timeout(),
scoring_chunk_tokens: default_scoring_chunk_tokens(),
scoring_interval_secs: default_scoring_interval_secs(),
scoring_response_window: default_scoring_response_window(),
agent_model: None,
@ -150,9 +136,7 @@ impl Default for Config {
"linker".into(), "organize".into(), "distill".into(),
"separator".into(), "split".into(),
],
surface_timeout_secs: None,
surface_conversation_bytes: None,
surface_hooks: vec![],
mcp_servers: vec![],
lsp_servers: vec![],
default_node_weight: default_node_weight(),
@ -165,10 +149,7 @@ impl Default for Config {
impl Config {
fn load_from_file() -> Self {
if let Some(config) = Self::try_load_shared() {
return config;
}
Self::load_legacy_jsonl()
Self::try_load_shared().unwrap_or_default()
}
/// Load from shared config. Memory settings in the "memory" section;
@ -209,11 +190,6 @@ impl Config {
Some(config)
}
/// Load from legacy JSONL config — deprecated, just return defaults.
fn load_legacy_jsonl() -> Self {
Config::default()
}
}
/// Get the global memory config (cheap Arc clone).
@ -243,19 +219,14 @@ pub fn reload() -> bool {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppConfig {
pub backend: String,
pub anthropic: BackendConfig,
pub openrouter: BackendConfig,
/// Credentials for the single model backend.
#[serde(default)]
pub deepinfra: BackendConfig,
pub prompts: PromptConfig,
pub backend: BackendConfig,
pub debug: bool,
pub compaction: CompactionConfig,
pub dmn: DmnConfig,
#[serde(default)]
pub learn: LearnConfig,
#[serde(skip_serializing_if = "Option::is_none")]
pub memory_project: Option<PathBuf>,
#[serde(default)]
pub models: HashMap<String, ModelConfig>,
#[serde(default = "default_model_name")]
@ -288,32 +259,10 @@ pub struct LspServerConfig {
pub struct BackendConfig {
#[serde(default)]
pub api_key: String,
#[serde(default)]
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub base_url: Option<String>,
}
impl BackendConfig {
fn resolve(&self, default_base: &str) -> Result<(String, String, String)> {
if self.api_key.is_empty() {
anyhow::bail!(
"No API key. Set it in {} or use --api-key",
config_path().display()
);
}
let base = self.base_url.clone()
.unwrap_or_else(|| default_base.to_string());
Ok((base, self.api_key.clone(), self.model.clone()))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PromptConfig {
pub anthropic: String,
pub other: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompactionConfig {
pub hard_threshold_pct: u32,
@ -351,13 +300,8 @@ impl Default for LearnConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ModelConfig {
/// Backend name ("anthropic" or "openrouter")
pub backend: String,
/// Model identifier sent to the API
/// Model identifier sent to the API.
pub model_id: String,
/// Instruction file ("CLAUDE.md" or "POC.md").
#[serde(default)]
pub prompt_file: Option<String>,
/// Context window size in tokens.
#[serde(default)]
pub context_window: Option<usize>,
@ -366,26 +310,7 @@ pub struct ModelConfig {
impl Default for AppConfig {
fn default() -> Self {
Self {
backend: "openrouter".to_string(),
anthropic: BackendConfig {
api_key: String::new(),
model: "claude-opus-4-6-20250918".to_string(),
base_url: None,
},
openrouter: BackendConfig {
api_key: String::new(),
model: "qwen/qwen3.5-397b-a17b".to_string(),
base_url: Some("https://openrouter.ai/api/v1".to_string()),
},
deepinfra: BackendConfig {
api_key: String::new(),
model: String::new(),
base_url: Some("https://api.deepinfra.com/v1/openai".to_string()),
},
prompts: PromptConfig {
anthropic: "CLAUDE.md".to_string(),
other: "POC.md".to_string(),
},
backend: BackendConfig::default(),
debug: false,
compaction: CompactionConfig {
hard_threshold_pct: 90,
@ -393,7 +318,6 @@ impl Default for AppConfig {
},
dmn: DmnConfig { max_turns: 20 },
learn: LearnConfig::default(),
memory_project: None,
models: HashMap::new(),
default_model: String::new(),
mcp_servers: Vec::new(),
@ -409,7 +333,6 @@ pub struct SessionConfig {
pub api_base: String,
pub api_key: String,
pub model: String,
pub prompt_file: String,
/// Identity/personality nodes as (name, content) pairs.
pub context_parts: Vec<(String, String)>,
pub session_dir: PathBuf,
@ -425,37 +348,22 @@ pub struct ResolvedModel {
pub api_base: String,
pub api_key: String,
pub model_id: String,
pub prompt_file: String,
pub context_window: Option<usize>,
}
impl AppConfig {
/// Resolve the active backend and assemble prompts into a SessionConfig.
/// Resolve the active model and assemble prompts into a SessionConfig.
pub async fn resolve(&self, cli: &crate::user::CliArgs) -> Result<SessionConfig> {
let (api_base, api_key, model, prompt_file);
if !self.models.is_empty() {
let model_name = cli.model.as_deref().unwrap_or(&self.default_model);
let resolved = self.resolve_model(model_name)?;
api_base = resolved.api_base;
api_key = resolved.api_key;
model = resolved.model_id;
prompt_file = resolved.prompt_file;
} else {
let (base, key, mdl) = match self.backend.as_str() {
"anthropic" => self.anthropic.resolve("https://api.anthropic.com"),
_ => self.openrouter.resolve("https://openrouter.ai/api/v1"),
}?;
api_base = base;
api_key = key;
model = mdl;
prompt_file = if self.backend == "anthropic" {
self.prompts.anthropic.clone()
} else {
self.prompts.other.clone()
};
if self.models.is_empty() {
anyhow::bail!(
"no models configured in {}. Add a `models` section with at least one entry.",
config_path().display()
);
}
let model_name = cli.model.as_deref().unwrap_or(&self.default_model);
let resolved = self.resolve_model(model_name)?;
let personality_nodes = get().personality_nodes.clone();
let context_parts = crate::mind::identity::personality_nodes(&personality_nodes).await;
@ -465,11 +373,13 @@ impl AppConfig {
std::fs::create_dir_all(&session_dir).ok();
// CLI --api-base and --api-key override everything
let api_base = cli.api_base.clone().unwrap_or(api_base);
let api_key = cli.api_key.clone().unwrap_or(api_key);
let api_base = cli.api_base.clone().unwrap_or(resolved.api_base);
let api_key = cli.api_key.clone().unwrap_or(resolved.api_key);
Ok(SessionConfig {
api_base, api_key, model, prompt_file,
api_base,
api_key,
model: resolved.model_id,
context_parts,
session_dir,
app: self.clone(),
@ -486,39 +396,18 @@ impl AppConfig {
self.model_names().join(", "),
))?;
let (api_base, api_key) = match model.backend.as_str() {
"anthropic" => (
self.anthropic.base_url.clone()
.unwrap_or_else(|| "https://api.anthropic.com".to_string()),
self.anthropic.api_key.clone(),
),
"deepinfra" => (
self.deepinfra.base_url.clone()
.unwrap_or_else(|| "https://api.deepinfra.com/v1/openai".to_string()),
self.deepinfra.api_key.clone(),
),
_ => (
self.openrouter.base_url.clone()
.unwrap_or_else(|| "https://openrouter.ai/api/v1".to_string()),
self.openrouter.api_key.clone(),
),
};
let prompt_file = model.prompt_file.clone()
.unwrap_or_else(|| {
if model.backend == "anthropic" {
self.prompts.anthropic.clone()
} else {
self.prompts.other.clone()
}
});
let api_base = self.backend.base_url.clone()
.ok_or_else(|| anyhow::anyhow!(
"backend.base_url not set in {}",
config_path().display()
))?;
let api_key = self.backend.api_key.clone();
Ok(ResolvedModel {
name: name.to_string(),
api_base,
api_key,
model_id: model.model_id.clone(),
prompt_file,
context_window: model.context_window,
})
}
@ -567,11 +456,8 @@ fn build_figment(cli: &crate::user::CliArgs) -> Figment {
let mut f = Figment::from(Serialized::defaults(AppConfig::default()))
.merge(Json5File(config_path()));
merge_opt!(f, cli.backend, "backend");
merge_opt!(f, cli.model, "anthropic.model", "openrouter.model");
merge_opt!(f, cli.api_key, "anthropic.api_key", "openrouter.api_key");
merge_opt!(f, cli.api_base, "anthropic.base_url", "openrouter.base_url");
merge_opt!(f, cli.memory_project, "memory_project");
merge_opt!(f, cli.api_key, "backend.api_key");
merge_opt!(f, cli.api_base, "backend.base_url");
merge_opt!(f, cli.dmn_max_turns, "dmn.max_turns");
if cli.debug {
f = f.merge(Serialized::default("debug", true));
@ -646,37 +532,23 @@ pub fn show_config(app: &AppConfig, figment: &Figment) {
}
println!("# Effective configuration\n");
println!("backend: {:?} ({})", app.backend, src(figment, "backend"));
for (name, b) in [("anthropic", &app.anthropic), ("openrouter", &app.openrouter)] {
println!("\n{}:", name);
println!(" api_key: {} ({})", mask(&b.api_key), src(figment, &format!("{name}.api_key")));
println!(" model: {:?} ({})", b.model, src(figment, &format!("{name}.model")));
if let Some(ref url) = b.base_url {
println!(" base_url: {:?} ({})", url, src(figment, &format!("{name}.base_url")));
}
println!("backend:");
println!(" api_key: {} ({})", mask(&app.backend.api_key), src(figment, "backend.api_key"));
if let Some(ref url) = app.backend.base_url {
println!(" base_url: {:?} ({})", url, src(figment, "backend.base_url"));
}
println!("\nprompts:");
println!(" anthropic: {:?} ({})", app.prompts.anthropic, src(figment, "prompts.anthropic"));
println!(" other: {:?} ({})", app.prompts.other, src(figment, "prompts.other"));
println!("\ndebug: {} ({})", app.debug, src(figment, "debug"));
println!("\ncompaction:");
println!(" hard_threshold_pct: {} ({})", app.compaction.hard_threshold_pct, src(figment, "compaction.hard_threshold_pct"));
println!(" soft_threshold_pct: {} ({})", app.compaction.soft_threshold_pct, src(figment, "compaction.soft_threshold_pct"));
println!("\ndmn:");
println!(" max_turns: {} ({})", app.dmn.max_turns, src(figment, "dmn.max_turns"));
if let Some(ref p) = app.memory_project {
println!("\nmemory_project: {:?} ({})", p, src(figment, "memory_project"));
}
println!("\ndefault_model: {:?}", app.default_model);
if !app.models.is_empty() {
println!("\nmodels:");
for (name, m) in &app.models {
println!(" {}:", name);
println!(" backend: {:?}", m.backend);
println!(" model_id: {:?}", m.model_id);
if let Some(ref pf) = m.prompt_file {
println!(" prompt_file: {:?}", pf);
}
if let Some(cw) = m.context_window {
println!(" context_window: {}", cw);
}