learn: F6 screen — scoring stats, ActivityGuard, configurable threshold

Three changes that together reshape the F6 fine-tune-review screen:

1. Finetune scoring reports through the standard agent activity system
   instead of a separate finetune_progress String. The previous design
   ran an independent progress field that forced a cross-lock dance and
   bespoke UI plumbing. start_finetune_scoring now uses start_activity
   + activity.update, so the usual status line and notifications
   capture scoring progress uniformly with other background work.

2. MindState gains a FinetuneScoringStats snapshot (responses seen,
   above threshold, max divergence, error). The F6 empty screen shows
   this instead of a loading message — so after a scoring run that
   produced zero candidates, you can see *why* (e.g., max_divergence
   below threshold).

3. The divergence threshold is configurable from F6 via +/- hotkeys
   (scales by 10×) and persisted to ~/.consciousness/config.json5 via
   config_writer::set_learn_threshold. AppConfig grows a learn section
   with a threshold field (default 1e-7).

Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-16 11:49:26 -04:00
parent ac40c2cb98
commit e5dd8312c7
5 changed files with 237 additions and 85 deletions

View file

@ -60,12 +60,16 @@ impl From<crate::subconscious::learn::FinetuneCandidate> for FinetuneCandidate {
pub(crate) struct LearnScreen {
list_state: ListState,
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
}
impl LearnScreen {
pub fn new() -> Self {
pub fn new(
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
) -> Self {
Self {
list_state: ListState::default(),
mind_tx,
}
}
@ -112,6 +116,22 @@ impl ScreenView for LearnScreen {
KeyCode::Char('s') => {
app.finetune_send_approved();
}
KeyCode::Char('+') | KeyCode::Char('=') => {
// Raise threshold 10× (less sensitive — fewer candidates)
if let Some(ms) = &app.mind_state {
let new = ms.learn_threshold * 10.0;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnThreshold(new));
}
}
KeyCode::Char('-') => {
// Lower threshold 10× (more sensitive — more candidates)
if let Some(ms) = &app.mind_state {
let new = ms.learn_threshold / 10.0;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnThreshold(new));
}
}
_ => {}
}
}
@ -123,19 +143,13 @@ impl ScreenView for LearnScreen {
self.list_state.select(Some(sel));
}
// Get scoring progress from mind state
let progress = app.mind_state.as_ref()
.map(|ms| ms.finetune_progress.as_str())
.unwrap_or("");
// Now render
let gen_on = crate::subconscious::learn::alternates_enabled();
let title_right = if !progress.is_empty() {
format!(" {} ", progress)
} else if gen_on {
" learn [gen] ".to_string()
let threshold = app.mind_state.as_ref().map(|ms| ms.learn_threshold).unwrap_or(0.0);
let title_right = if gen_on {
format!(" learn [thresh: {:e}] [gen] ", threshold)
} else {
" learn ".to_string()
format!(" learn [thresh: {:e}] ", threshold)
};
let block = Block::default()
.title_top(Line::from(screen_legend()).left_aligned())
@ -148,58 +162,50 @@ impl ScreenView for LearnScreen {
let candidates = &app.finetune_candidates;
if candidates.is_empty() {
let msg = if progress.is_empty() {
" No candidates yet — scoring runs after each turn."
} else {
" Scoring in progress..."
};
frame.render_widget(
Paragraph::new(Line::styled(msg, Style::default().fg(Color::DarkGray))),
inner,
);
return;
}
render_empty(frame, inner, app);
} else {
// Layout: list on left, detail on right
let [list_area, detail_area] = Layout::horizontal([
Constraint::Percentage(40),
Constraint::Percentage(60),
]).areas(inner);
// Layout: list on left, detail on right
let [list_area, detail_area] = Layout::horizontal([
Constraint::Percentage(40),
Constraint::Percentage(60),
]).areas(inner);
// Render candidate list
let items: Vec<ListItem> = candidates.iter().map(|c| {
let status_char = match c.status {
CandidateStatus::Pending => ' ',
CandidateStatus::Approved => '+',
CandidateStatus::Rejected => '-',
CandidateStatus::Sent => '*',
};
let style = match c.status {
CandidateStatus::Pending => Style::default(),
CandidateStatus::Approved => Style::default().fg(Color::Green),
CandidateStatus::Rejected => Style::default().fg(Color::DarkGray),
CandidateStatus::Sent => Style::default().fg(Color::Cyan),
};
ListItem::new(Line::from(vec![
Span::styled(format!("[{}] ", status_char), style),
Span::styled(format!("{:.2} ", c.divergence), Style::default().fg(Color::Yellow)),
Span::raw(truncate(&c.response_text, 30)),
]))
}).collect();
// Render candidate list
let items: Vec<ListItem> = candidates.iter().map(|c| {
let status_char = match c.status {
CandidateStatus::Pending => ' ',
CandidateStatus::Approved => '+',
CandidateStatus::Rejected => '-',
CandidateStatus::Sent => '*',
};
let style = match c.status {
CandidateStatus::Pending => Style::default(),
CandidateStatus::Approved => Style::default().fg(Color::Green),
CandidateStatus::Rejected => Style::default().fg(Color::DarkGray),
CandidateStatus::Sent => Style::default().fg(Color::Cyan),
};
ListItem::new(Line::from(vec![
Span::styled(format!("[{}] ", status_char), style),
Span::styled(format!("{:.2} ", c.divergence), Style::default().fg(Color::Yellow)),
Span::raw(truncate(&c.response_text, 30)),
]))
}).collect();
let list = List::new(items)
.block(Block::default().borders(Borders::RIGHT).title(" candidates "))
.highlight_style(Style::default().add_modifier(Modifier::REVERSED));
frame.render_stateful_widget(list, list_area, &mut self.list_state);
let list = List::new(items)
.block(Block::default().borders(Borders::RIGHT).title(" candidates "))
.highlight_style(Style::default().add_modifier(Modifier::REVERSED));
frame.render_stateful_widget(list, list_area, &mut self.list_state);
// Render detail for selected candidate
if let Some(idx) = self.selected_idx() {
if let Some(candidate) = candidates.get(idx) {
render_detail(frame, candidate, detail_area);
// Render detail for selected candidate
if let Some(idx) = self.selected_idx() {
if let Some(candidate) = candidates.get(idx) {
render_detail(frame, candidate, detail_area);
}
}
}
// Render help at bottom
// Render help at bottom (always, even when empty)
let gen_status = if gen_on { "[on]" } else { "[off]" };
let help = Line::from(vec![
Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)),
Span::raw("=nav "),
@ -208,9 +214,11 @@ impl ScreenView for LearnScreen {
Span::styled("r", Style::default().fg(Color::Red)),
Span::raw("=reject "),
Span::styled("g", Style::default().fg(Color::Yellow)),
Span::raw("=gen "),
Span::raw(format!("=gen{} ", gen_status)),
Span::styled("s", Style::default().fg(Color::Magenta)),
Span::raw("=send "),
Span::raw("=send "),
Span::styled("+/-", Style::default().fg(Color::Cyan)),
Span::raw("=thresh "),
]);
let help_area = Rect {
y: area.y + area.height - 1,
@ -221,6 +229,56 @@ impl ScreenView for LearnScreen {
}
}
fn render_empty(frame: &mut Frame, inner: Rect, app: &App) {
let mut lines = Vec::new();
lines.push(Line::from(""));
match app.mind_state.as_ref().and_then(|ms| ms.finetune_last_run.as_ref()) {
Some(stats) => {
lines.push(Line::from(vec![
Span::raw(" Last run: "),
Span::styled(
format!("{}", stats.responses_considered),
Style::default().fg(Color::Cyan),
),
Span::raw(" responses considered, "),
Span::styled(
format!("{}", stats.above_threshold),
Style::default().fg(if stats.above_threshold > 0 { Color::Green } else { Color::DarkGray }),
),
Span::raw(" above threshold, max divergence: "),
Span::styled(
format!("{:.4}", stats.max_divergence),
Style::default().fg(Color::Yellow),
),
]));
if let Some(err) = &stats.error {
lines.push(Line::from(vec![
Span::raw(" "),
Span::styled(
format!("Error: {}", err),
Style::default().fg(Color::Red),
),
]));
}
}
None => {
lines.push(Line::styled(
" No scoring run yet.",
Style::default().fg(Color::DarkGray),
));
}
}
lines.push(Line::from(""));
lines.push(Line::styled(
" Scoring runs at startup and after each turn.",
Style::default().fg(Color::DarkGray),
));
frame.render_widget(Paragraph::new(lines), inner);
}
fn render_detail(frame: &mut Frame, c: &FinetuneCandidate, area: Rect) {
let [header_area, content_area] = Layout::vertical([
Constraint::Length(3),

View file

@ -389,7 +389,7 @@ async fn run(
Box::new(crate::user::subconscious::SubconsciousScreen::new()),
Box::new(crate::user::unconscious::UnconsciousScreen::new()),
Box::new(crate::user::thalamus::ThalamusScreen::new()),
Box::new(crate::user::learn::LearnScreen::new()),
Box::new(crate::user::learn::LearnScreen::new(mind_tx.clone())),
];
let mut active_screen: usize = 1; // F-key number
tui::set_screen_legend(tui::screen_legend_from(&*screens));
@ -466,7 +466,8 @@ async fn run(
idle_state.decay_ewma();
app.update_idle(&idle_state);
app.agent_state = mind.subconscious_snapshots().await;
if let Ok(mut unc) = mind.unconscious.try_lock() {
{
let mut unc = mind.unconscious.lock().await;
let toggles: Vec<String> = app.agent_toggles.drain(..).collect();
for name in &toggles {
if mind.subconscious.lock().await.toggle(name).is_none() {
@ -480,10 +481,13 @@ async fn run(
};
app.unconscious_state = unc.snapshots(store_guard.as_deref());
app.graph_health = unc.graph_health.clone();
}
// Sync mind state (finetune candidates, last scoring run, etc.)
{
let ms = mind.shared.lock().unwrap();
// Sync finetune candidates: add new ones, keep existing (preserves approval status)
// Remove sent candidates (already trained, no need to keep)
// Keep only 10 most recent rejected candidates
// Sync finetune candidates: add new ones, keep existing (preserves approval status),
// remove sent candidates, keep only 10 most recent rejected.
app.finetune_candidates.retain(|c| c.status != learn::CandidateStatus::Sent);
for c in &ms.finetune_candidates {
let exists = app.finetune_candidates.iter()
@ -492,7 +496,6 @@ async fn run(
app.finetune_candidates.push(learn::FinetuneCandidate::from(c.clone()));
}
}
// Limit rejected candidates to 10 most recent
let mut rejected: Vec<_> = app.finetune_candidates.iter()
.enumerate()
.filter(|(_, c)| c.status == learn::CandidateStatus::Rejected)