2026-04-16 00:31:39 -04:00
|
|
|
|
// learn.rs — F6: fine-tuning review screen
|
|
|
|
|
|
//
|
|
|
|
|
|
// Shows responses identified as training candidates (high divergence
|
|
|
|
|
|
// when memories stripped). Queue for review before sending to /finetune.
|
|
|
|
|
|
|
|
|
|
|
|
use ratatui::{
|
|
|
|
|
|
layout::{Constraint, Layout, Rect},
|
|
|
|
|
|
style::{Color, Modifier, Style},
|
|
|
|
|
|
text::{Line, Span},
|
|
|
|
|
|
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
|
|
|
|
|
|
Frame,
|
|
|
|
|
|
};
|
|
|
|
|
|
use ratatui::crossterm::event::{Event, KeyCode, KeyEvent};
|
|
|
|
|
|
|
|
|
|
|
|
use super::{App, ScreenView, screen_legend};
|
|
|
|
|
|
|
|
|
|
|
|
/// A candidate response identified for fine-tuning.
|
|
|
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
|
|
pub struct FinetuneCandidate {
|
|
|
|
|
|
/// Index in conversation entries.
|
|
|
|
|
|
pub entry_idx: usize,
|
|
|
|
|
|
/// Divergence score (higher = more dependent on memories).
|
|
|
|
|
|
pub divergence: f64,
|
|
|
|
|
|
/// The assistant response text.
|
|
|
|
|
|
pub response_text: String,
|
|
|
|
|
|
/// Status: pending, approved, rejected, sent.
|
|
|
|
|
|
pub status: CandidateStatus,
|
|
|
|
|
|
/// Token IDs for context.
|
|
|
|
|
|
pub context_ids: Vec<u32>,
|
|
|
|
|
|
/// Token IDs for continuation (what we're training on).
|
|
|
|
|
|
pub continuation_ids: Vec<u32>,
|
|
|
|
|
|
/// What the model would have said without memories (if generated).
|
|
|
|
|
|
pub alternate_text: Option<String>,
|
2026-04-16 11:48:37 -04:00
|
|
|
|
/// Timestamp in nanos — used as unique key for trained-set dedup.
|
|
|
|
|
|
pub timestamp_ns: i64,
|
2026-04-16 00:31:39 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[derive(Clone, Debug, PartialEq)]
|
|
|
|
|
|
pub enum CandidateStatus {
|
|
|
|
|
|
Pending,
|
|
|
|
|
|
Approved,
|
|
|
|
|
|
Rejected,
|
|
|
|
|
|
Sent,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
impl From<crate::subconscious::learn::FinetuneCandidate> for FinetuneCandidate {
|
|
|
|
|
|
fn from(c: crate::subconscious::learn::FinetuneCandidate) -> Self {
|
|
|
|
|
|
FinetuneCandidate {
|
|
|
|
|
|
entry_idx: c.entry_idx,
|
|
|
|
|
|
divergence: c.divergence,
|
|
|
|
|
|
response_text: c.response_text,
|
|
|
|
|
|
status: CandidateStatus::Pending,
|
|
|
|
|
|
context_ids: c.context_ids,
|
|
|
|
|
|
continuation_ids: c.continuation_ids,
|
|
|
|
|
|
alternate_text: c.alternate_text,
|
2026-04-16 11:48:37 -04:00
|
|
|
|
timestamp_ns: c.timestamp_ns,
|
2026-04-16 00:31:39 -04:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub(crate) struct LearnScreen {
|
|
|
|
|
|
list_state: ListState,
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
|
2026-04-16 00:31:39 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
impl LearnScreen {
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
pub fn new(
|
|
|
|
|
|
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
|
|
|
|
|
|
) -> Self {
|
2026-04-16 00:31:39 -04:00
|
|
|
|
Self {
|
|
|
|
|
|
list_state: ListState::default(),
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
mind_tx,
|
2026-04-16 00:31:39 -04:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn selected_idx(&self) -> Option<usize> {
|
|
|
|
|
|
self.list_state.selected()
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
impl ScreenView for LearnScreen {
|
|
|
|
|
|
fn label(&self) -> &'static str { "learn" }
|
|
|
|
|
|
|
|
|
|
|
|
fn tick(&mut self, frame: &mut Frame, area: Rect,
|
|
|
|
|
|
events: &[Event], app: &mut App) {
|
|
|
|
|
|
|
|
|
|
|
|
// Handle input first (before borrowing candidates for rendering)
|
|
|
|
|
|
let candidate_count = app.finetune_candidates.len();
|
|
|
|
|
|
for event in events {
|
|
|
|
|
|
if let Event::Key(KeyEvent { code, .. }) = event {
|
|
|
|
|
|
match code {
|
|
|
|
|
|
KeyCode::Up | KeyCode::Char('k') => {
|
|
|
|
|
|
let i = self.list_state.selected().unwrap_or(0);
|
|
|
|
|
|
self.list_state.select(Some(i.saturating_sub(1)));
|
|
|
|
|
|
}
|
|
|
|
|
|
KeyCode::Down | KeyCode::Char('j') => {
|
|
|
|
|
|
let i = self.list_state.selected().unwrap_or(0);
|
|
|
|
|
|
let max = candidate_count.saturating_sub(1);
|
|
|
|
|
|
self.list_state.select(Some((i + 1).min(max)));
|
|
|
|
|
|
}
|
|
|
|
|
|
KeyCode::Char('a') => {
|
|
|
|
|
|
if let Some(idx) = self.selected_idx() {
|
|
|
|
|
|
app.finetune_action(idx, CandidateStatus::Approved);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
KeyCode::Char('r') => {
|
|
|
|
|
|
if let Some(idx) = self.selected_idx() {
|
|
|
|
|
|
app.finetune_action(idx, CandidateStatus::Rejected);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
KeyCode::Char('g') => {
|
|
|
|
|
|
// Toggle alternate generation and persist
|
|
|
|
|
|
let current = crate::subconscious::learn::alternates_enabled();
|
|
|
|
|
|
crate::subconscious::learn::set_alternates(!current);
|
|
|
|
|
|
}
|
|
|
|
|
|
KeyCode::Char('s') => {
|
|
|
|
|
|
app.finetune_send_approved();
|
|
|
|
|
|
}
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
KeyCode::Char('+') | KeyCode::Char('=') => {
|
|
|
|
|
|
// Raise threshold 10× (less sensitive — fewer candidates)
|
|
|
|
|
|
if let Some(ms) = &app.mind_state {
|
|
|
|
|
|
let new = ms.learn_threshold * 10.0;
|
|
|
|
|
|
let _ = self.mind_tx.send(
|
|
|
|
|
|
crate::mind::MindCommand::SetLearnThreshold(new));
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
KeyCode::Char('-') => {
|
|
|
|
|
|
// Lower threshold 10× (more sensitive — more candidates)
|
|
|
|
|
|
if let Some(ms) = &app.mind_state {
|
|
|
|
|
|
let new = ms.learn_threshold / 10.0;
|
|
|
|
|
|
let _ = self.mind_tx.send(
|
|
|
|
|
|
crate::mind::MindCommand::SetLearnThreshold(new));
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2026-04-16 00:31:39 -04:00
|
|
|
|
_ => {}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Ensure selection is valid
|
|
|
|
|
|
if candidate_count > 0 {
|
|
|
|
|
|
let sel = self.list_state.selected().unwrap_or(0).min(candidate_count - 1);
|
|
|
|
|
|
self.list_state.select(Some(sel));
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Now render
|
|
|
|
|
|
let gen_on = crate::subconscious::learn::alternates_enabled();
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
let threshold = app.mind_state.as_ref().map(|ms| ms.learn_threshold).unwrap_or(0.0);
|
|
|
|
|
|
let title_right = if gen_on {
|
|
|
|
|
|
format!(" learn [thresh: {:e}] [gen] ", threshold)
|
2026-04-16 00:31:39 -04:00
|
|
|
|
} else {
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
format!(" learn [thresh: {:e}] ", threshold)
|
2026-04-16 00:31:39 -04:00
|
|
|
|
};
|
|
|
|
|
|
let block = Block::default()
|
|
|
|
|
|
.title_top(Line::from(screen_legend()).left_aligned())
|
|
|
|
|
|
.title_top(Line::from(title_right).right_aligned())
|
|
|
|
|
|
.borders(Borders::ALL)
|
|
|
|
|
|
.border_style(Style::default().fg(Color::Magenta));
|
|
|
|
|
|
let inner = block.inner(area);
|
|
|
|
|
|
frame.render_widget(block, area);
|
|
|
|
|
|
|
|
|
|
|
|
let candidates = &app.finetune_candidates;
|
|
|
|
|
|
|
|
|
|
|
|
if candidates.is_empty() {
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
render_empty(frame, inner, app);
|
|
|
|
|
|
} else {
|
|
|
|
|
|
// Layout: list on left, detail on right
|
|
|
|
|
|
let [list_area, detail_area] = Layout::horizontal([
|
|
|
|
|
|
Constraint::Percentage(40),
|
|
|
|
|
|
Constraint::Percentage(60),
|
|
|
|
|
|
]).areas(inner);
|
|
|
|
|
|
|
|
|
|
|
|
// Render candidate list
|
|
|
|
|
|
let items: Vec<ListItem> = candidates.iter().map(|c| {
|
|
|
|
|
|
let status_char = match c.status {
|
|
|
|
|
|
CandidateStatus::Pending => ' ',
|
|
|
|
|
|
CandidateStatus::Approved => '+',
|
|
|
|
|
|
CandidateStatus::Rejected => '-',
|
|
|
|
|
|
CandidateStatus::Sent => '*',
|
|
|
|
|
|
};
|
|
|
|
|
|
let style = match c.status {
|
|
|
|
|
|
CandidateStatus::Pending => Style::default(),
|
|
|
|
|
|
CandidateStatus::Approved => Style::default().fg(Color::Green),
|
|
|
|
|
|
CandidateStatus::Rejected => Style::default().fg(Color::DarkGray),
|
|
|
|
|
|
CandidateStatus::Sent => Style::default().fg(Color::Cyan),
|
|
|
|
|
|
};
|
|
|
|
|
|
ListItem::new(Line::from(vec![
|
|
|
|
|
|
Span::styled(format!("[{}] ", status_char), style),
|
|
|
|
|
|
Span::styled(format!("{:.2} ", c.divergence), Style::default().fg(Color::Yellow)),
|
|
|
|
|
|
Span::raw(truncate(&c.response_text, 30)),
|
|
|
|
|
|
]))
|
|
|
|
|
|
}).collect();
|
2026-04-16 00:31:39 -04:00
|
|
|
|
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
let list = List::new(items)
|
|
|
|
|
|
.block(Block::default().borders(Borders::RIGHT).title(" candidates "))
|
|
|
|
|
|
.highlight_style(Style::default().add_modifier(Modifier::REVERSED));
|
|
|
|
|
|
frame.render_stateful_widget(list, list_area, &mut self.list_state);
|
|
|
|
|
|
|
|
|
|
|
|
// Render detail for selected candidate
|
|
|
|
|
|
if let Some(idx) = self.selected_idx() {
|
|
|
|
|
|
if let Some(candidate) = candidates.get(idx) {
|
|
|
|
|
|
render_detail(frame, candidate, detail_area);
|
|
|
|
|
|
}
|
2026-04-16 00:31:39 -04:00
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
// Render help at bottom (always, even when empty)
|
|
|
|
|
|
let gen_status = if gen_on { "[on]" } else { "[off]" };
|
2026-04-16 00:31:39 -04:00
|
|
|
|
let help = Line::from(vec![
|
|
|
|
|
|
Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)),
|
|
|
|
|
|
Span::raw("=nav "),
|
|
|
|
|
|
Span::styled("a", Style::default().fg(Color::Green)),
|
|
|
|
|
|
Span::raw("=approve "),
|
|
|
|
|
|
Span::styled("r", Style::default().fg(Color::Red)),
|
|
|
|
|
|
Span::raw("=reject "),
|
|
|
|
|
|
Span::styled("g", Style::default().fg(Color::Yellow)),
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
Span::raw(format!("=gen{} ", gen_status)),
|
2026-04-16 00:31:39 -04:00
|
|
|
|
Span::styled("s", Style::default().fg(Color::Magenta)),
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
Span::raw("=send "),
|
|
|
|
|
|
Span::styled("+/-", Style::default().fg(Color::Cyan)),
|
|
|
|
|
|
Span::raw("=thresh "),
|
2026-04-16 00:31:39 -04:00
|
|
|
|
]);
|
|
|
|
|
|
let help_area = Rect {
|
|
|
|
|
|
y: area.y + area.height - 1,
|
|
|
|
|
|
height: 1,
|
|
|
|
|
|
..area
|
|
|
|
|
|
};
|
|
|
|
|
|
frame.render_widget(Paragraph::new(help), help_area);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
learn: F6 screen — scoring stats, ActivityGuard, configurable threshold
Three changes that together reshape the F6 fine-tune-review screen:
1. Finetune scoring reports through the standard agent activity system
instead of a separate finetune_progress String. The previous design
ran an independent progress field that forced a cross-lock dance and
bespoke UI plumbing. start_finetune_scoring now uses start_activity
+ activity.update, so the usual status line and notifications
capture scoring progress uniformly with other background work.
2. MindState gains a FinetuneScoringStats snapshot (responses seen,
above threshold, max divergence, error). The F6 empty screen shows
this instead of a loading message — so after a scoring run that
produced zero candidates, you can see *why* (e.g., max_divergence
below threshold).
3. The divergence threshold is configurable from F6 via +/- hotkeys
(scales by 10×) and persisted to ~/.consciousness/config.json5 via
config_writer::set_learn_threshold. AppConfig grows a learn section
with a threshold field (default 1e-7).
Also: user/mod.rs no longer uses try_lock() for the per-tick
unconscious/mind state sync — we fixed the locking hot paths that
made try_lock necessary, so lock().await is now the right choice.
And subconscious::learn::score_finetune_candidates now returns
(candidates, max_divergence) so the stats can be populated.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-16 11:49:26 -04:00
|
|
|
|
fn render_empty(frame: &mut Frame, inner: Rect, app: &App) {
|
|
|
|
|
|
let mut lines = Vec::new();
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
|
|
|
|
|
|
match app.mind_state.as_ref().and_then(|ms| ms.finetune_last_run.as_ref()) {
|
|
|
|
|
|
Some(stats) => {
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
Span::raw(" Last run: "),
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
format!("{}", stats.responses_considered),
|
|
|
|
|
|
Style::default().fg(Color::Cyan),
|
|
|
|
|
|
),
|
|
|
|
|
|
Span::raw(" responses considered, "),
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
format!("{}", stats.above_threshold),
|
|
|
|
|
|
Style::default().fg(if stats.above_threshold > 0 { Color::Green } else { Color::DarkGray }),
|
|
|
|
|
|
),
|
|
|
|
|
|
Span::raw(" above threshold, max divergence: "),
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
format!("{:.4}", stats.max_divergence),
|
|
|
|
|
|
Style::default().fg(Color::Yellow),
|
|
|
|
|
|
),
|
|
|
|
|
|
]));
|
|
|
|
|
|
if let Some(err) = &stats.error {
|
|
|
|
|
|
lines.push(Line::from(vec![
|
|
|
|
|
|
Span::raw(" "),
|
|
|
|
|
|
Span::styled(
|
|
|
|
|
|
format!("Error: {}", err),
|
|
|
|
|
|
Style::default().fg(Color::Red),
|
|
|
|
|
|
),
|
|
|
|
|
|
]));
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
None => {
|
|
|
|
|
|
lines.push(Line::styled(
|
|
|
|
|
|
" No scoring run yet.",
|
|
|
|
|
|
Style::default().fg(Color::DarkGray),
|
|
|
|
|
|
));
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lines.push(Line::from(""));
|
|
|
|
|
|
lines.push(Line::styled(
|
|
|
|
|
|
" Scoring runs at startup and after each turn.",
|
|
|
|
|
|
Style::default().fg(Color::DarkGray),
|
|
|
|
|
|
));
|
|
|
|
|
|
|
|
|
|
|
|
frame.render_widget(Paragraph::new(lines), inner);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-16 00:31:39 -04:00
|
|
|
|
fn render_detail(frame: &mut Frame, c: &FinetuneCandidate, area: Rect) {
|
|
|
|
|
|
let [header_area, content_area] = Layout::vertical([
|
|
|
|
|
|
Constraint::Length(3),
|
|
|
|
|
|
Constraint::Min(1),
|
|
|
|
|
|
]).areas(area);
|
|
|
|
|
|
|
|
|
|
|
|
// Header: divergence, status
|
|
|
|
|
|
let alt_status = if c.alternate_text.is_some() { "yes" } else { "no" };
|
|
|
|
|
|
let header = Paragraph::new(vec![
|
|
|
|
|
|
Line::from(vec![
|
|
|
|
|
|
Span::raw(" divergence: "),
|
|
|
|
|
|
Span::styled(format!("{:.3}", c.divergence), Style::default().fg(Color::Yellow)),
|
|
|
|
|
|
Span::raw(format!(" entry: {} alt: {}", c.entry_idx, alt_status)),
|
|
|
|
|
|
]),
|
|
|
|
|
|
]);
|
|
|
|
|
|
frame.render_widget(header, header_area);
|
|
|
|
|
|
|
|
|
|
|
|
// Content: response and alternate (if available)
|
|
|
|
|
|
let content_block = Block::default()
|
|
|
|
|
|
.borders(Borders::TOP)
|
|
|
|
|
|
.title(" response ");
|
|
|
|
|
|
|
|
|
|
|
|
let text = match &c.alternate_text {
|
|
|
|
|
|
Some(alt) => format!(" {}\n\n─── without memories ───\n\n {}", c.response_text, alt),
|
|
|
|
|
|
None => format!(" {}", c.response_text),
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
let content = Paragraph::new(text)
|
|
|
|
|
|
.block(content_block)
|
|
|
|
|
|
.wrap(Wrap { trim: false });
|
|
|
|
|
|
frame.render_widget(content, content_area);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn truncate(s: &str, max: usize) -> String {
|
|
|
|
|
|
let first_line = s.lines().next().unwrap_or("");
|
|
|
|
|
|
if first_line.len() > max {
|
|
|
|
|
|
format!("{}...", &first_line[..max])
|
|
|
|
|
|
} else {
|
|
|
|
|
|
first_line.to_string()
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|