TurnResult: remove text field, simplify oneshot loop

- Remove TurnResult.text (was dead code - Agent::turn handles text internally)
- Simplify run_with_backend to just iterate over steps (Agent::turn loops
  for tool calls and handles empty responses internally)
- Change run/run_shared/run_forked_shared to return Result<(), String>
- Remove AgentResult.output field (no callers used it)
- Stub out legacy text-parsing code (audit, compare) that needs redesign
- Update digest.rs to not depend on text return
- Add level parameter to journal_new/journal_update for digest support

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-12 02:04:50 -04:00
parent ef80398466
commit f00532bdb7
11 changed files with 82 additions and 422 deletions

View file

@ -466,36 +466,6 @@ enum AgentCmd {
#[arg(long)]
apply: bool,
},
/// Mine conversation for experiential moments to journal
#[command(name = "experience-mine")]
ExperienceMine {
/// Path to JSONL transcript (default: most recent)
jsonl_path: Option<String>,
},
/// Extract atomic facts from conversation transcripts
#[command(name = "fact-mine")]
FactMine {
/// Path to JSONL transcript or directory (with --batch)
path: String,
/// Process all .jsonl files in directory
#[arg(long)]
batch: bool,
/// Show chunks without calling model
#[arg(long)]
dry_run: bool,
/// Write JSON to file (default: stdout)
#[arg(long, short)]
output: Option<String>,
/// Skip transcripts with fewer messages
#[arg(long, default_value_t = 10)]
min_messages: usize,
},
/// Extract facts from a transcript and store directly
#[command(name = "fact-mine-store")]
FactMineStore {
/// Path to JSONL transcript
path: String,
},
/// Run a single agent by name
Run {
/// Agent name (e.g. observation, linker, distill)
@ -526,19 +496,6 @@ enum AgentCmd {
#[arg(long, default_value_t = 10)]
count: usize,
},
/// Evaluate agent quality by LLM-sorted ranking
#[command(name = "evaluate")]
Evaluate {
/// Number of pairwise matchups to run
#[arg(long, default_value_t = 30)]
matchups: usize,
/// Model to use for comparison (haiku or sonnet)
#[arg(long, default_value = "haiku")]
model: String,
/// Show example comparison prompt without calling LLM
#[arg(long)]
dry_run: bool,
},
}
#[derive(Subcommand)]
@ -790,16 +747,9 @@ impl Run for AgentCmd {
=> cli::agent::cmd_apply_consolidation(apply, report.as_deref()),
Self::Digest { level } => cmd_digest(level),
Self::DigestLinks { apply } => cli::agent::cmd_digest_links(apply),
Self::ExperienceMine { .. }
=> Err("experience-mine has been removed — use the observation agent instead.".into()),
Self::FactMine { path, batch, dry_run, output, min_messages }
=> cli::agent::cmd_fact_mine(&path, batch, dry_run, output.as_deref(), min_messages),
Self::FactMineStore { path } => cli::agent::cmd_fact_mine_store(&path),
Self::Run { agent, count, target, query, dry_run, local, state_dir }
=> cli::agent::cmd_run_agent(&agent, count, &target, query.as_deref(), dry_run, local, state_dir.as_deref()),
Self::ReplayQueue { count } => cli::agent::cmd_replay_queue(count),
Self::Evaluate { matchups, model, dry_run }
=> cli::agent::cmd_evaluate_agents(matchups, &model, dry_run),
}
}
}