Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

246 changed files with 2368 additions and 7892 deletions

400
Cargo.lock generated
View file

@ -372,12 +372,6 @@ dependencies = [
"shlex",
]
[[package]]
name = "cesu8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]]
name = "cfg-if"
version = "1.0.4"
@ -459,16 +453,6 @@ version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570"
[[package]]
name = "combine"
version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"memchr",
]
[[package]]
name = "compact_str"
version = "0.9.0"
@ -504,18 +488,15 @@ dependencies = [
"figment",
"futures",
"glob",
"html2md",
"http",
"http-body-util",
"hyper",
"hyper-util",
"imagesize",
"json-five",
"json5",
"libc",
"log",
"memchr",
"memmap2",
"notify-debouncer-mini",
"paste",
"peg",
"ratatui",
@ -1107,25 +1088,6 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
[[package]]
name = "fsevent-sys"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2"
dependencies = [
"libc",
]
[[package]]
name = "futf"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df420e2e84819663797d1ec6544b13c5be84629e7bb00dc960d6917db2987843"
dependencies = [
"mac",
"new_debug_unreachable",
]
[[package]]
name = "futures"
version = "0.3.32"
@ -1326,34 +1288,6 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "html2md"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cff9891f2e0d9048927fbdfc28b11bf378f6a93c7ba70b23d0fbee9af6071b4"
dependencies = [
"html5ever",
"jni",
"lazy_static",
"markup5ever_rcdom",
"percent-encoding",
"regex",
]
[[package]]
name = "html5ever"
version = "0.27.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c13771afe0e6e846f1e67d038d4cb29998a6779f93c809212e4e9c32efd244d4"
dependencies = [
"log",
"mac",
"markup5ever",
"proc-macro2",
"quote",
"syn 2.0.117",
]
[[package]]
name = "http"
version = "1.4.0"
@ -1479,12 +1413,6 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "imagesize"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09e54e57b4c48b40f7aec75635392b12b3421fa26fe8b4332e63138ed278459c"
[[package]]
name = "indexmap"
version = "2.14.0"
@ -1525,26 +1453,6 @@ version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb"
[[package]]
name = "inotify"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd5b3eaf1a28b758ac0faa5a4254e8ab2705605496f1b1f3fbbc3988ad73d199"
dependencies = [
"bitflags 2.11.0",
"inotify-sys",
"libc",
]
[[package]]
name = "inotify-sys"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
dependencies = [
"libc",
]
[[package]]
name = "instability"
version = "0.3.12"
@ -1603,48 +1511,6 @@ dependencies = [
"syn 2.0.117",
]
[[package]]
name = "jni"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec"
dependencies = [
"cesu8",
"combine",
"jni-sys 0.3.1",
"log",
"thiserror 1.0.69",
"walkdir",
]
[[package]]
name = "jni-sys"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258"
dependencies = [
"jni-sys 0.4.1",
]
[[package]]
name = "jni-sys"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2"
dependencies = [
"jni-sys-macros",
]
[[package]]
name = "jni-sys-macros"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264"
dependencies = [
"quote",
"syn 2.0.117",
]
[[package]]
name = "jobserver"
version = "0.1.34"
@ -1665,16 +1531,6 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "json-five"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "865f2d01a4549c1fd8c60640c03ae5249eb374cd8cde8b905628d4b1af95c87c"
dependencies = [
"serde",
"unicode-general-category",
]
[[package]]
name = "json5"
version = "1.3.1"
@ -1696,26 +1552,6 @@ dependencies = [
"thiserror 2.0.18",
]
[[package]]
name = "kqueue"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a"
dependencies = [
"kqueue-sys",
"libc",
]
[[package]]
name = "kqueue-sys"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b"
dependencies = [
"bitflags 1.3.2",
"libc",
]
[[package]]
name = "lab"
version = "0.11.0"
@ -1800,12 +1636,6 @@ dependencies = [
"hashbrown 0.16.1",
]
[[package]]
name = "mac"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4"
[[package]]
name = "mac_address"
version = "1.1.8"
@ -1832,32 +1662,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "670fdfda89751bc4a84ac13eaa63e205cf0fd22b4c9a5fbfa085b63c1f1d3a30"
[[package]]
name = "markup5ever"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16ce3abbeba692c8b8441d036ef91aea6df8da2c6b6e21c7e14d3c18e526be45"
dependencies = [
"log",
"phf",
"phf_codegen",
"string_cache",
"string_cache_codegen",
"tendril",
]
[[package]]
name = "markup5ever_rcdom"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edaa21ab3701bfee5099ade5f7e1f84553fd19228cf332f13cd6e964bf59be18"
dependencies = [
"html5ever",
"markup5ever",
"tendril",
"xml5ever",
]
[[package]]
name = "memchr"
version = "2.8.0"
@ -1938,12 +1742,6 @@ dependencies = [
"syn 2.0.117",
]
[[package]]
name = "new_debug_unreachable"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086"
[[package]]
name = "nix"
version = "0.29.0"
@ -1976,45 +1774,6 @@ dependencies = [
"memchr",
]
[[package]]
name = "notify"
version = "8.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3"
dependencies = [
"bitflags 2.11.0",
"fsevent-sys",
"inotify",
"kqueue",
"libc",
"log",
"mio",
"notify-types",
"walkdir",
"windows-sys 0.60.2",
]
[[package]]
name = "notify-debouncer-mini"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17849edfaabd9a5fef1c606d99cfc615a8e99f7ac4366406d86c7942a3184cf2"
dependencies = [
"log",
"notify",
"notify-types",
"tempfile",
]
[[package]]
name = "notify-types"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42b8cfee0e339a0337359f3c88165702ac6e600dc01c0cc9579a92d62b08477a"
dependencies = [
"bitflags 2.11.0",
]
[[package]]
name = "num-conv"
version = "0.2.1"
@ -2340,12 +2099,6 @@ dependencies = [
"zerocopy",
]
[[package]]
name = "precomputed-hash"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
[[package]]
name = "prettyplease"
version = "0.2.37"
@ -2969,31 +2722,6 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b2231b7c3057d5e4ad0156fb3dc807d900806020c5ffa3ee6ff2c8c76fb8520"
[[package]]
name = "string_cache"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f"
dependencies = [
"new_debug_unreachable",
"parking_lot",
"phf_shared",
"precomputed-hash",
"serde",
]
[[package]]
name = "string_cache_codegen"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0"
dependencies = [
"phf_generator",
"phf_shared",
"proc-macro2",
"quote",
]
[[package]]
name = "strsim"
version = "0.11.1"
@ -3083,17 +2811,6 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "tendril"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d24a120c5fc464a3458240ee02c299ebcb9d67b5249c8848b09d639dca8d7bb0"
dependencies = [
"futf",
"mac",
"utf-8",
]
[[package]]
name = "terminfo"
version = "0.9.0"
@ -3667,12 +3384,6 @@ version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142"
[[package]]
name = "unicode-general-category"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b993bddc193ae5bd0d623b49ec06ac3e9312875fdae725a975c51db1cc1677f"
[[package]]
name = "unicode-ident"
version = "1.0.24"
@ -3741,12 +3452,6 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "utf-8"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
[[package]]
name = "utf8parse"
version = "0.2.2"
@ -4089,16 +3794,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
dependencies = [
"windows-targets 0.53.5",
"windows-targets",
]
[[package]]
@ -4116,31 +3812,14 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm 0.52.6",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.53.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3"
dependencies = [
"windows-link",
"windows_aarch64_gnullvm 0.53.1",
"windows_aarch64_msvc 0.53.1",
"windows_i686_gnu 0.53.1",
"windows_i686_gnullvm 0.53.1",
"windows_i686_msvc 0.53.1",
"windows_x86_64_gnu 0.53.1",
"windows_x86_64_gnullvm 0.53.1",
"windows_x86_64_msvc 0.53.1",
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
@ -4149,96 +3828,48 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnu"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_i686_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
[[package]]
name = "wit-bindgen"
version = "0.51.0"
@ -4327,17 +3958,6 @@ dependencies = [
"wasmparser",
]
[[package]]
name = "xml5ever"
version = "0.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bbb26405d8e919bc1547a5aa9abc95cbfa438f04844f5fdd9dc7596b748bf69"
dependencies = [
"log",
"mac",
"markup5ever",
]
[[package]]
name = "yaml-rust"
version = "0.4.5"

View file

@ -20,7 +20,6 @@ edition.workspace = true
[dependencies]
anyhow = "1"
html2md = "0.2"
crossterm = { version = "0.29", features = ["event-stream", "bracketed-paste", "osc52"] }
clap = { version = "4", features = ["derive"] }
figment = { version = "0.10", features = ["env"] }
@ -30,8 +29,7 @@ log = "0.4"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
json-five = "0.3"
notify-debouncer-mini = "0.7"
json5 = "1.3"
ratatui = { version = "0.30", features = ["unstable-rendered-line-info"] }
tui-markdown = { git = "https://github.com/koverstreet/tui-markdown", subdirectory = "tui-markdown" }
@ -69,7 +67,6 @@ hyper-util = { version = "0.1", features = ["tokio"], default-features = false }
http-body-util = "0.1"
bytes = "1"
base64 = "0.22"
imagesize = "0.14"
rustls = "0.23"
tokio-rustls = "0.26"

View file

@ -237,19 +237,11 @@ impl State {
async fn send_privmsg(&mut self, target: &str, msg: &str) -> io::Result<()> {
// Send PRIVMSG, which is used for both private and channel messages.
// Splits into multiple fragments if necessary.
//
// Two constraints:
// 1. IRC max line = 512 bytes including CRLF. The server prepends
// our prefix when relaying: ":nick!~user@host PRIVMSG target :msg\r\n"
// So per-PRIVMSG message content must fit in 512 - overhead.
// 2. Embedded '\n' in the message would be interpreted by the
// server as an end-of-command marker, truncating us. Split
// on newlines first and send each line as its own PRIVMSG.
//
// IRC max line = 512 bytes including CRLF. The server prepends
// our prefix when relaying: ":nick!~user@host PRIVMSG target :msg\r\n"
// User is often ~nick (nick_len + 1). Host is up to 63 bytes.
// Cloaked OFTC hosts can be longer - pad the budget.
let nick_len = self.config.nick.len();
let overhead = 1 + nick_len + 1 + (nick_len + 1) + 1 + 80
let overhead = 1 + nick_len + 2 + nick_len + 1 + 63
+ " PRIVMSG ".len() + target.len() + " :".len() + 2;
let max_msg = 512_usize.saturating_sub(overhead);
@ -257,34 +249,24 @@ impl State {
return Err(io::Error::new(io::ErrorKind::InvalidInput, "target too long"));
}
for line in msg.split('\n') {
let mut remaining = line;
// Empty lines (blank paragraph breaks) can't be sent as empty
// PRIVMSGs - most IRC servers reject them. Skip.
if remaining.is_empty() { continue; }
loop {
let split_at = if remaining.len() <= max_msg {
remaining.len()
} else {
// Find last char boundary at or before max_msg.
let mut i = max_msg;
while i > 0 && !remaining.is_char_boundary(i) { i -= 1; }
// Prefer splitting at a word boundary - look back up to
// max_msg/4 chars for a space. With dense content (code)
// we may not find one; fall back to the char boundary.
let lookback = max_msg / 4;
let bytes = remaining.as_bytes();
let mut j = i;
while j > 0 && (i - j) < lookback && bytes[j - 1] != b' ' {
j -= 1;
}
if j > 0 && bytes[j - 1] == b' ' { j } else { i }
};
let (chunk, rest) = remaining.split_at(split_at);
self.send_raw(&format!("PRIVMSG {target} :{chunk}")).await?;
remaining = rest;
if remaining.is_empty() { break; }
}
// Split on UTF-8 char boundaries
let mut remaining = msg;
while !remaining.is_empty() {
let split_at = if remaining.len() <= max_msg {
remaining.len()
} else {
// Find last char boundary at or before max_msg
let mut i = max_msg;
while i > 0 && !remaining.is_char_boundary(i) { i -= 1; }
// To avoid splitting mid-word, see if there was a space recently
let mut j = i;
while j > 1 && j > i-10 && remaining.as_bytes()[j] != b' ' { j -= 1; }
if remaining.as_bytes()[j] == b' ' { j }
else if i == 0 { max_msg } else { i }
};
let (chunk, rest) = remaining.split_at(split_at);
self.send_raw(&format!("PRIVMSG {target} :{chunk}")).await?;
remaining = rest;
}
Ok(())
}

View file

@ -22,21 +22,6 @@ pub struct Usage {
pub total_tokens: u32,
}
/// Concept-readout manifest returned by the vLLM server's
/// `/v1/readout/manifest` endpoint. Maps the nameless tensor indices
/// in streaming `readout` fields back to concept names and layer
/// indices.
#[derive(Debug, Clone, Deserialize)]
pub struct ReadoutManifest {
pub concepts: Vec<String>,
pub layers: Vec<u32>,
}
/// Per-token per-layer concept projections streamed alongside each
/// sampled token. Shape `[n_layers][n_concepts]`. Named values come
/// from pairing with the manifest fetched at startup.
pub type TokenReadout = Vec<Vec<f32>>;
/// A JoinHandle that aborts its task when dropped.
pub(crate) struct AbortOnDrop(tokio::task::JoinHandle<()>);
@ -60,10 +45,7 @@ pub(crate) struct SamplingParams {
/// One token from the streaming completions API.
pub enum StreamToken {
/// A sampled token, optionally with its per-layer concept readout.
/// `readout` is `None` when the server has readout disabled or
/// returned no readout for this chunk.
Token { id: u32, readout: Option<TokenReadout> },
Token(u32),
Done { usage: Option<Usage> },
Error(String),
}
@ -91,10 +73,9 @@ impl ApiClient {
}
}
pub(crate) fn stream_completion_mm(
pub(crate) fn stream_completion(
&self,
prompt_tokens: &[u32],
images: &[super::context::WireImage],
sampling: SamplingParams,
priority: Option<i32>,
) -> (mpsc::UnboundedReceiver<StreamToken>, AbortOnDrop) {
@ -103,15 +84,12 @@ impl ApiClient {
let api_key = self.api_key.clone();
let model = self.model.clone();
let prompt_tokens = prompt_tokens.to_vec();
let images: Vec<(Vec<u8>, String)> = images.iter()
.map(|i| (i.bytes.clone(), i.mime.clone()))
.collect();
let base_url = self.base_url.clone();
let handle = tokio::spawn(async move {
let result = stream_completions(
&client, &base_url, &api_key, &model,
&prompt_tokens, &images, &tx, sampling, priority,
&prompt_tokens, &tx, sampling, priority,
).await;
if let Err(e) = result {
let _ = tx.send(StreamToken::Error(e.to_string()));
@ -124,32 +102,6 @@ impl ApiClient {
pub fn base_url(&self) -> &str { &self.base_url }
pub fn api_key(&self) -> &str { &self.api_key }
/// Fetch `/v1/readout/manifest` — returns `Ok(Some(..))` if
/// readout is enabled on the server, `Ok(None)` on 404 (disabled),
/// or an error on any other failure.
///
/// Call once at startup and cache the result; the manifest doesn't
/// change during a server run.
pub async fn fetch_readout_manifest(&self) -> Result<Option<ReadoutManifest>> {
let url = format!("{}/readout/manifest", self.base_url);
let auth = format!("Bearer {}", self.api_key);
let response = self
.client
.get_with_headers(&url, &[("Authorization", &auth)])
.await
.map_err(|e| anyhow::anyhow!("readout manifest fetch ({}): {}", url, e))?;
let status = response.status();
if status.as_u16() == 404 {
return Ok(None);
}
if !status.is_success() {
let body = response.text().await.unwrap_or_default();
let n = body.floor_char_boundary(body.len().min(500));
anyhow::bail!("readout manifest HTTP {} ({}): {}", status, url, &body[..n]);
}
Ok(Some(response.json().await?))
}
}
async fn stream_completions(
@ -158,7 +110,6 @@ async fn stream_completions(
api_key: &str,
model: &str,
prompt_tokens: &[u32],
images: &[(Vec<u8>, String)],
tx: &mpsc::UnboundedSender<StreamToken>,
sampling: SamplingParams,
priority: Option<i32>,
@ -175,14 +126,6 @@ async fn stream_completions(
"skip_special_tokens": false,
"stop_token_ids": [super::tokenizer::IM_END],
});
if !images.is_empty() {
use base64::Engine;
let b64 = base64::engine::general_purpose::STANDARD;
let uris: Vec<String> = images.iter()
.map(|(bytes, mime)| format!("data:{};base64,{}", mime, b64.encode(bytes)))
.collect();
request["multi_modal_data"] = serde_json::json!({ "image": uris });
}
if let Some(p) = priority {
request["priority"] = serde_json::json!(p);
}
@ -216,45 +159,17 @@ async fn stream_completions(
};
for choice in choices {
// `readout`, if present, is a nested list
// `[num_tokens][n_layers][n_concepts]`. Parse it once per
// chunk and pair rows with token ids by index — the rows
// are in the same order as `token_ids`.
let readouts: Option<Vec<TokenReadout>> = choice["readout"]
.as_array()
.map(|outer| {
outer.iter().filter_map(|per_token| {
per_token.as_array().map(|layers| {
layers.iter().filter_map(|per_layer| {
per_layer.as_array().map(|vals| {
vals.iter()
.filter_map(|v| v.as_f64().map(|f| f as f32))
.collect::<Vec<f32>>()
})
}).collect::<Vec<Vec<f32>>>()
})
}).collect()
});
if let Some(ids) = choice["token_ids"].as_array() {
for (i, id_val) in ids.iter().enumerate() {
for id_val in ids {
if let Some(id) = id_val.as_u64() {
let readout = readouts
.as_ref()
.and_then(|r| r.get(i).cloned());
let _ = tx.send(StreamToken::Token {
id: id as u32,
readout,
});
let _ = tx.send(StreamToken::Token(id as u32));
}
}
} else if let Some(text) = choice["text"].as_str() {
// Fallback: provider didn't return token_ids, encode locally.
// No readout available in this path — the encoder may
// produce a different token count than the server did.
// Fallback: provider didn't return token_ids, encode locally
if !text.is_empty() {
for id in super::tokenizer::encode(text) {
let _ = tx.send(StreamToken::Token { id, readout: None });
let _ = tx.send(StreamToken::Token(id));
}
}
}

View file

@ -81,33 +81,10 @@ pub enum NodeBody {
Memory { key: String, text: String, score: Option<f64> },
Dmn(String),
// Vision input — rendered as <|vision_start|> <|image_pad|>×N <|vision_end|>.
// `token_count` is N, the count vLLM will compute for this image's grid.
Image {
#[serde(with = "b64_bytes")]
bytes: Vec<u8>,
mime: String,
orig_height: u32,
orig_width: u32,
token_count: u32,
},
// Non-visible (0 tokens in prompt)
Log(String),
}
mod b64_bytes {
use base64::{Engine, engine::general_purpose::STANDARD};
use serde::{Serializer, Deserializer, Deserialize};
pub fn serialize<S: Serializer>(bytes: &[u8], s: S) -> Result<S::Ok, S::Error> {
s.serialize_str(&STANDARD.encode(bytes))
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let s = String::deserialize(d)?;
STANDARD.decode(s).map_err(serde::de::Error::custom)
}
}
/// A leaf node: typed content with cached token IDs.
/// Token IDs are not serialized — they're recomputed on deserialization.
#[derive(Debug, Clone, Serialize)]
@ -115,7 +92,7 @@ pub struct NodeLeaf {
body: NodeBody,
#[serde(skip)]
token_ids: Vec<u32>,
timestamp: DateTime<Utc>,
timestamp: Option<DateTime<Utc>>,
}
impl<'de> Deserialize<'de> for NodeLeaf {
@ -123,10 +100,14 @@ impl<'de> Deserialize<'de> for NodeLeaf {
#[derive(Deserialize)]
struct Raw {
body: NodeBody,
timestamp: DateTime<Utc>,
timestamp: Option<DateTime<Utc>>,
}
let raw = Raw::deserialize(deserializer)?;
let token_ids = raw.body.compute_token_ids();
let token_ids = if raw.body.is_prompt_visible() {
tokenizer::encode(&raw.body.render())
} else {
vec![]
};
Ok(NodeLeaf { body: raw.body, token_ids, timestamp: raw.timestamp })
}
}
@ -138,7 +119,6 @@ pub enum AstNode {
Branch {
role: Role,
children: Vec<AstNode>,
timestamp: DateTime<Utc>,
/// Per-response memory attribution from full scoring matrix.
/// Maps memory key → divergence score for this response.
#[serde(default, skip_serializing_if = "std::collections::BTreeMap::is_empty")]
@ -218,11 +198,7 @@ impl NodeBody {
fn render_into(&self, out: &mut String) {
match self {
Self::Content(text) => out.push_str(text),
Self::Thinking(text) => {
out.push_str("<think>\n");
out.push_str(text);
out.push_str("\n</think>\n");
}
Self::Thinking(_) => {},
Self::Log(_) => {},
Self::ToolCall { name, arguments } => {
out.push_str("<tool_call>\n");
@ -244,13 +220,6 @@ impl NodeBody {
out.push_str(text);
out.push_str("<|im_end|>\n");
}
Self::Image { token_count, .. } => {
out.push_str("<|vision_start|>");
for _ in 0..*token_count {
out.push_str("<|image_pad|>");
}
out.push_str("<|vision_end|>");
}
}
}
@ -262,27 +231,7 @@ impl NodeBody {
}
fn is_prompt_visible(&self) -> bool {
!matches!(self, Self::Log(_))
}
/// Hand-assemble token IDs for body types where running the tokenizer
/// on the rendered text would be needlessly expensive (Image). Falls
/// back to encoding the rendered text for everything else.
fn compute_token_ids(&self) -> Vec<u32> {
if !self.is_prompt_visible() {
return Vec::new();
}
match self {
Self::Image { token_count, .. } => {
let mut ids = Vec::with_capacity(*token_count as usize + 2);
ids.push(tokenizer::VISION_START);
ids.extend(std::iter::repeat(tokenizer::IMAGE_PAD)
.take(*token_count as usize));
ids.push(tokenizer::VISION_END);
ids
}
_ => tokenizer::encode(&self.render()),
}
!matches!(self, Self::Thinking(_) | Self::Log(_))
}
/// The text content of this leaf (for display, not rendering).
@ -292,26 +241,29 @@ impl NodeBody {
| Self::ToolResult(t) | Self::Dmn(t) => t,
Self::ToolCall { name, .. } => name,
Self::Memory { text, .. } => text,
Self::Image { mime, .. } => mime,
}
}
}
impl NodeLeaf {
fn new(body: NodeBody) -> Self {
let token_ids = body.compute_token_ids();
Self { body, token_ids, timestamp: Utc::now() }
let token_ids = if body.is_prompt_visible() {
tokenizer::encode(&body.render())
} else {
vec![]
};
Self { body, token_ids, timestamp: None }
}
pub fn with_timestamp(mut self, ts: DateTime<Utc>) -> Self {
self.timestamp = ts;
self.timestamp = Some(ts);
self
}
pub fn body(&self) -> &NodeBody { &self.body }
pub fn token_ids(&self) -> &[u32] { &self.token_ids }
pub fn tokens(&self) -> usize { self.token_ids.len() }
pub fn timestamp(&self) -> DateTime<Utc> { self.timestamp }
pub fn timestamp(&self) -> Option<DateTime<Utc>> { self.timestamp }
}
impl AstNode {
@ -352,35 +304,16 @@ impl AstNode {
Self::Leaf(NodeLeaf::new(NodeBody::Log(text.into())))
}
/// Build an Image leaf. `token_count` is computed from the image
/// dimensions using Qwen3-VL's resizing rules.
pub fn image(
bytes: Vec<u8>,
mime: impl Into<String>,
orig_height: u32,
orig_width: u32,
) -> Self {
let token_count = qwen3_image_token_count(orig_height, orig_width);
Self::Leaf(NodeLeaf::new(NodeBody::Image {
bytes,
mime: mime.into(),
orig_height,
orig_width,
token_count,
}))
}
// -- Branch constructors --------------------------------------------------
pub fn branch(role: Role, children: Vec<AstNode>) -> Self {
Self::Branch { role, children, timestamp: Utc::now(), memory_scores: Default::default() }
Self::Branch { role, children, memory_scores: Default::default() }
}
pub fn system_msg(text: impl Into<String>) -> Self {
Self::Branch {
role: Role::System,
children: vec![Self::content(text)],
timestamp: Utc::now(),
memory_scores: Default::default(),
}
}
@ -389,7 +322,6 @@ impl AstNode {
Self::Branch {
role: Role::User,
children: vec![Self::content(text)],
timestamp: Utc::now(),
memory_scores: Default::default(),
}
}
@ -399,13 +331,16 @@ impl AstNode {
pub fn retokenize(self) -> Self {
match self {
Self::Leaf(leaf) => {
let token_ids = leaf.body.compute_token_ids();
let token_ids = if leaf.body.is_prompt_visible() {
tokenizer::encode(&leaf.body.render())
} else {
vec![]
};
Self::Leaf(NodeLeaf { token_ids, ..leaf })
}
Self::Branch { role, children, timestamp, memory_scores } => Self::Branch {
Self::Branch { role, children, memory_scores, .. } => Self::Branch {
role,
children: children.into_iter().map(|c| c.retokenize()).collect(),
timestamp,
memory_scores,
},
}
@ -413,8 +348,8 @@ impl AstNode {
pub fn with_timestamp(mut self, ts: DateTime<Utc>) -> Self {
match &mut self {
Self::Leaf(leaf) => leaf.timestamp = ts,
Self::Branch { timestamp, .. } => *timestamp = ts,
Self::Leaf(leaf) => leaf.timestamp = Some(ts),
Self::Branch { .. } => {}
}
self
}
@ -435,7 +370,7 @@ impl AstNode {
/// Short label for the UI.
pub fn label(&self) -> String {
let app = crate::config::app();
let cfg = crate::config::get();
match self {
Self::Branch { role, children, .. } => {
let preview = children.first()
@ -444,8 +379,8 @@ impl AstNode {
.unwrap_or_default();
match role {
Role::System => "system".into(),
Role::User => format!("{}: {}", app.user_name, preview),
Role::Assistant => format!("{}: {}", app.assistant_name, preview),
Role::User => format!("{}: {}", cfg.user_name, preview),
Role::Assistant => format!("{}: {}", cfg.assistant_name, preview),
}
}
Self::Leaf(leaf) => match &leaf.body {
@ -458,8 +393,6 @@ impl AstNode {
None => format!("mem: {}", key),
},
NodeBody::Dmn(_) => "dmn".into(),
NodeBody::Image { orig_height, orig_width, token_count, .. } =>
format!("image: {}x{} ({} tokens)", orig_width, orig_height, token_count),
NodeBody::Log(t) => format!("log: {}", truncate_preview(t, 60)),
},
}
@ -652,17 +585,13 @@ fn drain_safe(buf: &mut String, tag_len: usize) -> String {
}
impl ResponseParser {
/// @in_think: whether the model's output begins inside a <think> block.
/// Set when the prompt was prefilled with "<think>\n" (native thinking
/// mode) so the parser captures reasoning tokens as Thinking until the
/// model emits </think>.
pub fn new(branch_idx: usize, in_think: bool) -> Self {
pub fn new(branch_idx: usize) -> Self {
Self {
branch_idx,
call_counter: 0,
buf: String::new(),
content_parts: Vec::new(),
in_think,
in_think: false,
think_buf: String::new(),
in_tool_call: false,
tool_call_buf: String::new(),
@ -690,12 +619,7 @@ impl ResponseParser {
let mut full_text = String::new();
while let Some(event) = stream.recv().await {
match event {
super::api::StreamToken::Token { id, readout } => {
if let Some(r) = readout {
if let Ok(mut buf) = agent.readout.lock() {
buf.push(id, r);
}
}
super::api::StreamToken::Token(id) => {
let text = super::tokenizer::decode(&[id]);
full_text.push_str(&text);
let mut ctx = agent.context.lock().await;
@ -897,153 +821,6 @@ impl Ast for ContextState {
}
}
/// An image collected from the AST for a request body. The AST stores
/// the pre-expanded token form (N image_pads) for accurate budget
/// accounting; the wire form collapses each Image to a single
/// `<|image_pad|>` between vision bookends and ships the bytes
/// separately as multi_modal_data.
pub struct WireImage {
pub bytes: Vec<u8>,
pub mime: String,
}
fn wire_into(node: &AstNode, tokens: &mut Vec<u32>, images: &mut Vec<WireImage>) {
match node {
AstNode::Leaf(leaf) => match leaf.body() {
NodeBody::Image { bytes, mime, .. } => {
tokens.push(tokenizer::VISION_START);
tokens.push(tokenizer::IMAGE_PAD);
tokens.push(tokenizer::VISION_END);
images.push(WireImage {
bytes: bytes.clone(),
mime: mime.clone(),
});
}
_ => tokens.extend_from_slice(leaf.token_ids()),
},
AstNode::Branch { role, children, .. } => {
tokens.push(tokenizer::IM_START);
tokens.extend(tokenizer::encode(&format!("{}\n", role.as_str())));
for c in children {
wire_into(c, tokens, images);
}
tokens.push(tokenizer::IM_END);
tokens.extend(tokenizer::encode("\n"));
}
}
}
pub fn memory_key(node: &AstNode) -> Option<&str> {
match node {
AstNode::Leaf(leaf) => match leaf.body() {
NodeBody::Memory { key, .. } => Some(key),
_ => None,
},
_ => None,
}
}
pub fn is_memory_node(node: &AstNode) -> bool {
matches!(node, AstNode::Leaf(leaf) if matches!(leaf.body(), NodeBody::Memory { .. }))
}
pub fn is_assistant(node: &AstNode) -> bool {
matches!(node, AstNode::Branch { role: Role::Assistant, .. })
}
/// Concatenate the text of a Branch's Leaf children — what the model
/// actually produced on that turn (Content + Thinking + ToolCall name).
pub fn render_branch_text(children: &[AstNode]) -> String {
children.iter()
.filter_map(|c| match c {
AstNode::Leaf(leaf) => Some(leaf.body().text().to_string()),
_ => None,
})
.collect::<Vec<_>>()
.join("")
}
/// Render the last `max_msgs` user/assistant branches before `idx` as a
/// review-friendly string with `[user]` / `[assistant]` markers.
pub fn render_prior_context(entries: &[AstNode], idx: usize, max_msgs: usize) -> String {
let mut picked: Vec<&AstNode> = Vec::with_capacity(max_msgs);
for i in (0..idx).rev() {
if picked.len() >= max_msgs { break; }
if let AstNode::Branch { role, .. } = &entries[i] {
if matches!(role, Role::User | Role::Assistant) {
picked.push(&entries[i]);
}
}
}
picked.reverse();
let mut out = String::new();
for node in picked {
if let AstNode::Branch { role, children, .. } = node {
let marker = match role {
Role::User => "[user]",
Role::Assistant => "[assistant]",
_ => continue,
};
out.push_str(marker);
out.push('\n');
out.push_str(render_branch_text(children).trim());
out.push_str("\n\n");
}
}
out.trim_end().to_string()
}
impl ContextState {
/// Assemble the prompt in wire form: token stream with a single
/// `<|image_pad|>` per image (vLLM expands back to N), plus the list
/// of images to send as multi_modal_data, plus the (start, end) token
/// positions of each assistant message branch emitted (used by the
/// scoring path as `score_ranges`).
///
/// `conv_range` selects a prefix (or any sub-range) of conversation
/// entries to include — the agent path passes `0..conversation().len()`;
/// scoring / candidate generation pass a prefix up to the entry of
/// interest.
///
/// `skip` is a predicate applied to identity and conversation entries;
/// returning true drops the node from the prompt. The agent path passes
/// `|_| false`; memory-ablation scoring passes e.g. `is_memory_node` or
/// `|n| memory_key(n) == Some(key)`.
pub fn wire_prompt<F>(
&self,
conv_range: std::ops::Range<usize>,
mut skip: F,
) -> (Vec<u32>, Vec<WireImage>, Vec<(usize, usize)>)
where F: FnMut(&AstNode) -> bool,
{
let mut tokens = Vec::new();
let mut images = Vec::new();
let mut assistant_ranges = Vec::new();
for node in self.system() {
wire_into(node, &mut tokens, &mut images);
}
for node in self.identity() {
if skip(node) { continue; }
wire_into(node, &mut tokens, &mut images);
}
for node in self.journal() {
wire_into(node, &mut tokens, &mut images);
}
for node in &self.conversation()[conv_range] {
if skip(node) { continue; }
let start = tokens.len();
let is_asst = matches!(node, AstNode::Branch { role: Role::Assistant, .. });
wire_into(node, &mut tokens, &mut images);
if is_asst {
assistant_ranges.push((start, tokens.len()));
}
}
(tokens, images, assistant_ranges)
}
}
impl ContextState {
fn section_mut(&mut self, section: Section) -> &mut Vec<AstNode> {
match section {
@ -1076,7 +853,11 @@ impl ContextState {
let node = &mut nodes[index];
match node {
AstNode::Leaf(leaf) => {
let token_ids = body.compute_token_ids();
let token_ids = if body.is_prompt_visible() {
tokenizer::encode(&body.render())
} else {
vec![]
};
leaf.body = body;
leaf.token_ids = token_ids;
}
@ -1104,16 +885,6 @@ impl ContextState {
self.section_mut(section).clear();
}
/// Total tokens across every section that gets serialized into the prompt.
/// Cheap sum over cached `node.tokens()`; call this before assembling to
/// decide whether to trim.
pub fn total_tokens(&self) -> usize {
self.system().iter().map(|n| n.tokens()).sum::<usize>()
+ self.identity().iter().map(|n| n.tokens()).sum::<usize>()
+ self.journal().iter().map(|n| n.tokens()).sum::<usize>()
+ self.conversation().iter().map(|n| n.tokens()).sum::<usize>()
}
/// Dedup and trim conversation entries to fit within the context budget.
///
/// Phase 1: Drop duplicate memories (keep last) and DMN entries.
@ -1216,63 +987,8 @@ impl ContextState {
}
}
// ---------------------------------------------------------------------------
// Qwen3-VL image token count
//
// Port of Qwen2VLImageProcessor.smart_resize + image_token_count. We need the
// exact same answer that vLLM's Qwen3VL processor will produce, because the
// token stream in our context must match what vLLM expands `<|image_pad|>`
// to at request time. Constants come from Qwen3.5-27B's preprocessor_config.
// ---------------------------------------------------------------------------
const QWEN3_PATCH_SIZE: u32 = 16;
const QWEN3_MERGE_SIZE: u32 = 2;
const QWEN3_MIN_PIXELS: u64 = 65_536;
const QWEN3_MAX_PIXELS: u64 = 16_777_216;
fn smart_resize(h: u32, w: u32, factor: u32, min_pixels: u64, max_pixels: u64) -> (u32, u32) {
let max_s = h.max(w) as f64;
let min_s = h.min(w) as f64;
assert!(max_s / min_s <= 200.0, "aspect ratio too extreme: {}x{}", h, w);
let fh = h as f64;
let fw = w as f64;
let ff = factor as f64;
let h_bar = ((fh / ff).round() as u32) * factor;
let w_bar = ((fw / ff).round() as u32) * factor;
let total = (h_bar as u64) * (w_bar as u64);
if total > max_pixels {
let beta = ((fh * fw) / max_pixels as f64).sqrt();
let hf = ((fh / beta / ff).floor() as u32) * factor;
let wf = ((fw / beta / ff).floor() as u32) * factor;
(hf.max(factor), wf.max(factor))
} else if total < min_pixels {
let beta = (min_pixels as f64 / (fh * fw)).sqrt();
let hc = ((fh * beta / ff).ceil() as u32) * factor;
let wc = ((fw * beta / ff).ceil() as u32) * factor;
(hc, wc)
} else {
(h_bar, w_bar)
}
}
/// Compute how many `<|image_pad|>` tokens vLLM will emit for an image of
/// the given dimensions. Matches Qwen3VL's feature-size calculation exactly:
/// (grid_h * grid_w) / merge_size^2
/// where (grid_h, grid_w) = resized dims / patch_size.
fn qwen3_image_token_count(orig_h: u32, orig_w: u32) -> u32 {
let factor = QWEN3_PATCH_SIZE * QWEN3_MERGE_SIZE;
let (rh, rw) = smart_resize(orig_h, orig_w, factor, QWEN3_MIN_PIXELS, QWEN3_MAX_PIXELS);
(rh / QWEN3_PATCH_SIZE) * (rw / QWEN3_PATCH_SIZE) / (QWEN3_MERGE_SIZE * QWEN3_MERGE_SIZE)
}
pub fn context_window() -> usize {
let app = crate::config::app();
app.backends.get(&app.default_backend)
.and_then(|b| b.context_window)
.unwrap_or(128_000)
crate::config::get().api_context_window
}
pub fn context_budget_tokens() -> usize {
@ -1377,7 +1093,7 @@ mod tests {
fn parse_into_ctx(chunks: &[&str]) -> (ContextState, Vec<PendingToolCall>) {
let mut ctx = ContextState::new();
ctx.push_no_log(Section::Conversation, AstNode::branch(Role::Assistant, vec![]));
let mut p = ResponseParser::new(0, false);
let mut p = ResponseParser::new(0);
let mut calls = Vec::new();
for chunk in chunks {
// Feed each chunk as a single token (id=0 for tests)
@ -1441,7 +1157,7 @@ mod tests {
let text = "<think>thought</think>response";
let mut ctx = ContextState::new();
ctx.push_no_log(Section::Conversation, AstNode::branch(Role::Assistant, vec![]));
let mut p = ResponseParser::new(0, false);
let mut p = ResponseParser::new(0);
for ch in text.chars() {
p.feed_token(&ch.to_string(), &mut ctx);
}
@ -1457,7 +1173,7 @@ mod tests {
let text = "text<tool_call>\n<function=bash>\n<parameter=command>ls</parameter>\n</function>\n</tool_call>more";
let mut ctx = ContextState::new();
ctx.push_no_log(Section::Conversation, AstNode::branch(Role::Assistant, vec![]));
let mut p = ResponseParser::new(0, false);
let mut p = ResponseParser::new(0);
let mut tool_calls = 0;
for ch in text.chars() {
tool_calls += p.feed_token(&ch.to_string(), &mut ctx).len();
@ -1505,10 +1221,8 @@ mod tests {
AstNode::thinking("hmm"),
AstNode::content("answer"),
]);
// Thinking renders wrapped in <think>...</think> so the model sees
// previous turns' reasoning (Qwen 3.6 style: CoT stays in the
// conversation across turns).
assert_eq!(node.render(), "<|im_start|>assistant\n<think>\nhmm\n</think>\nanswer<|im_end|>\n");
// Thinking renders as empty, content renders as-is
assert_eq!(node.render(), "<|im_start|>assistant\nanswer<|im_end|>\n");
}
#[test]
@ -1587,19 +1301,10 @@ mod tests {
fn test_tokenize_invisible_nodes_are_zero() {
if !init_tokenizer() { return; }
assert_eq!(AstNode::thinking("deep thoughts").tokens(), 0);
assert_eq!(AstNode::log("debug info").tokens(), 0);
}
#[test]
fn test_tokenize_thinking_matches_rendered_tags() {
if !init_tokenizer() { return; }
// Thinking is now prompt-visible (wrapped in <think>...</think>);
// token count must match the rendered wrapping.
let node = AstNode::thinking("deep thoughts");
assert_eq!(node.tokens(), tokenizer::encode(&node.render()).len());
}
#[test]
fn test_tokenize_decode_roundtrip() {
if !init_tokenizer() { return; }
@ -1635,139 +1340,4 @@ mod tests {
assert_token_invariants(node);
assert!(node.tokens() > 0);
}
// -- Timestamp deserialization tests ------------------------------------------
#[test]
fn test_timestamp_null_rejected() {
// Missing/null timestamps used to be accepted via a lenient
// deserialize fallback. Post-migration the schema is strict.
let json = r#"{"Leaf":{"body":{"Content":"hello"},"timestamp":null}}"#;
assert!(serde_json::from_str::<AstNode>(json).is_err());
}
#[test]
fn test_timestamp_missing_rejected() {
let json = r#"{"Leaf":{"body":{"Content":"hello"}}}"#;
assert!(serde_json::from_str::<AstNode>(json).is_err());
}
#[test]
fn test_branch_timestamp_missing_rejected() {
let json = r#"{"Branch":{"role":"User","children":[]}}"#;
assert!(serde_json::from_str::<AstNode>(json).is_err());
}
// -- Image leaf tests ---------------------------------------------------------
#[test]
fn test_smart_resize_within_bounds() {
// Typical case: 1024x768 → rounded to multiples of 32, under max.
let (h, w) = smart_resize(768, 1024, 32, 65_536, 16_777_216);
assert_eq!(h, 768);
assert_eq!(w, 1024);
}
#[test]
fn test_smart_resize_upscales_tiny() {
// 32x32 = 1024 pixels, below min_pixels=65536. Should scale up.
let (h, w) = smart_resize(32, 32, 32, 65_536, 16_777_216);
assert!((h as u64) * (w as u64) >= 65_536,
"resized {}x{} is under min_pixels", h, w);
assert_eq!(h % 32, 0);
assert_eq!(w % 32, 0);
}
#[test]
fn test_smart_resize_downscales_huge() {
// 8000x6000 = 48M pixels, above max_pixels=16M. Should scale down.
let (h, w) = smart_resize(8000, 6000, 32, 65_536, 16_777_216);
assert!((h as u64) * (w as u64) <= 16_777_216,
"resized {}x{} exceeds max_pixels", h, w);
assert_eq!(h % 32, 0);
assert_eq!(w % 32, 0);
}
#[test]
fn test_qwen3_token_count_matches_formula() {
// 512x512 → resized to 512x512 (already multiple of 32, within bounds).
// grid = 32x32, tokens = 32*32/4 = 256.
assert_eq!(qwen3_image_token_count(512, 512), 256);
}
#[test]
fn test_image_render_and_token_ids() {
let node = AstNode::image(vec![0u8, 1, 2, 3], "image/png", 512, 512);
let leaf = node.leaf().unwrap();
// 3 tokens of bookend + 256 image_pad tokens
assert_eq!(leaf.token_ids().len(), 258);
assert_eq!(leaf.token_ids()[0], tokenizer::VISION_START);
assert_eq!(leaf.token_ids()[257], tokenizer::VISION_END);
for pad in &leaf.token_ids()[1..257] {
assert_eq!(*pad, tokenizer::IMAGE_PAD);
}
// Rendered text has the expected bookends.
let rendered = leaf.body().render();
assert!(rendered.starts_with("<|vision_start|>"));
assert!(rendered.ends_with("<|vision_end|>"));
}
#[test]
fn test_wire_prompt_collapses_image_pads() {
let mut ctx = ContextState::new();
ctx.push_no_log(Section::Conversation, AstNode::branch(Role::User, vec![
AstNode::content("look:"),
AstNode::image(vec![0xDE, 0xAD], "image/png", 512, 512),
]));
// AST side: N image_pads + bookends, full budget accounting.
let full = ctx.token_ids();
let n_image_pads_full = full.iter()
.filter(|&&t| t == tokenizer::IMAGE_PAD).count();
assert_eq!(n_image_pads_full, qwen3_image_token_count(512, 512) as usize);
// Wire side: single image_pad, bytes moved to images list.
let (wire, images, _) = ctx.wire_prompt(0..ctx.conversation().len(), |_| false);
let n_image_pads_wire = wire.iter()
.filter(|&&t| t == tokenizer::IMAGE_PAD).count();
assert_eq!(n_image_pads_wire, 1);
assert_eq!(images.len(), 1);
assert_eq!(images[0].bytes, vec![0xDE, 0xAD]);
assert_eq!(images[0].mime, "image/png");
// vision_start/vision_end bookends are preserved in wire form.
assert_eq!(wire.iter().filter(|&&t| t == tokenizer::VISION_START).count(), 1);
assert_eq!(wire.iter().filter(|&&t| t == tokenizer::VISION_END).count(), 1);
}
#[test]
fn test_image_serde_roundtrip() {
let node = AstNode::image(vec![0xDE, 0xAD, 0xBE, 0xEF], "image/png", 64, 64);
let json = serde_json::to_string(&node).unwrap();
// bytes must be base64-encoded in the JSON form
assert!(json.contains("3q2+7w=="));
let back: AstNode = serde_json::from_str(&json).unwrap();
let leaf = back.leaf().unwrap();
match leaf.body() {
NodeBody::Image { bytes, mime, orig_height, orig_width, token_count } => {
assert_eq!(bytes, &[0xDE, 0xAD, 0xBE, 0xEF]);
assert_eq!(mime, "image/png");
assert_eq!(*orig_height, 64);
assert_eq!(*orig_width, 64);
assert_eq!(*token_count, qwen3_image_token_count(64, 64));
}
other => panic!("expected Image, got {:?}", other),
}
// token_ids are recomputed on deserialization
assert_eq!(leaf.token_ids().len(), leaf.tokens());
}
#[test]
fn test_timestamp_present_accepted() {
let json = r#"{"Leaf":{"body":{"Content":"hi"},"timestamp":"2026-04-16T12:00:00Z"}}"#;
let node: AstNode = serde_json::from_str(json).unwrap();
let leaf = node.leaf().unwrap();
assert_eq!(leaf.timestamp().to_rfc3339(),
"2026-04-16T12:00:00+00:00");
}
}

View file

@ -16,7 +16,6 @@
pub mod api;
pub mod context;
pub mod oneshot;
pub mod readout;
pub mod tokenizer;
pub mod tools;
@ -140,14 +139,10 @@ impl DispatchState {
pub struct Agent {
pub client: ApiClient,
pub app_config: crate::config::AppConfig,
pub prompt_file: String,
pub session_id: String,
pub context: crate::Mutex<ContextState>,
pub state: crate::Mutex<AgentState>,
/// Shared landing pad for per-token concept-readout projections
/// streamed from the vLLM server. Populated by the streaming
/// token handler, read by UI screens (amygdala). Manifest is
/// `None` when the server has readout disabled.
pub readout: readout::SharedReadoutBuffer,
}
/// Mutable agent state — behind its own mutex.
@ -178,10 +173,14 @@ pub struct AgentState {
pub pending_dmn_pause: bool,
pub provenance: String,
pub generation: u64,
pub memory_scoring_in_flight: bool,
pub active_tools: tools::ActiveTools,
/// vLLM scheduling priority (lower = higher priority).
/// 0 = interactive, 1 = surface agent, 2 = other subconscious, 10 = unconscious.
pub priority: Option<i32>,
/// Forked agents should not compact on overflow — it blows the
/// KV cache prefix and evicts the step prompts.
pub no_compact: bool,
pub changed: Arc<tokio::sync::Notify>,
}
@ -190,6 +189,7 @@ impl Agent {
client: ApiClient,
personality: Vec<(String, String)>,
app_config: crate::config::AppConfig,
prompt_file: String,
conversation_log: Option<ConversationLog>,
active_tools: tools::ActiveTools,
agent_tools: Vec<tools::Tool>,
@ -217,13 +217,12 @@ impl Agent {
}
let session_id = format!("consciousness-{}", chrono::Utc::now().format("%Y%m%d-%H%M%S"));
let readout = readout::new_shared();
let agent = Arc::new(Self {
client,
app_config,
prompt_file,
session_id,
context: crate::Mutex::new(context),
readout,
state: crate::Mutex::new(AgentState {
tools: agent_tools,
mcp_tools: McpToolAccess::All,
@ -241,39 +240,15 @@ impl Agent {
pending_dmn_pause: false,
provenance: "manual".to_string(),
generation: 0,
memory_scoring_in_flight: false,
active_tools,
priority: Some(0),
no_compact: false,
changed: Arc::new(tokio::sync::Notify::new()),
}),
});
agent.load_startup_journal().await;
// Probe the vLLM server for its readout manifest. Non-fatal:
// if readout isn't enabled the server returns 404 and we
// leave the manifest as None, which disables the amygdala
// screen gracefully.
match agent.client.fetch_readout_manifest().await {
Ok(Some(m)) => {
dbglog!(
"readout manifest: {} concepts, layers={:?}",
m.concepts.len(),
m.layers,
);
if let Ok(mut buf) = agent.readout.lock() {
buf.set_manifest(Some(m));
}
}
Ok(None) => {
dbglog!(
"readout manifest: server has readout disabled (404)"
);
}
Err(e) => {
dbglog!("readout manifest fetch failed: {}", e);
}
}
agent
}
@ -284,14 +259,9 @@ impl Agent {
Arc::new(Self {
client: self.client.clone(),
app_config: self.app_config.clone(),
prompt_file: self.prompt_file.clone(),
session_id: self.session_id.clone(),
context: crate::Mutex::new(ctx),
// Forks get an independent readout buffer. The amygdala
// screen reads the main conscious agent's buffer only;
// subconscious generations (scoring, reflection, etc.)
// shouldn't bleed into the main emotional readout even
// though they hit the same vLLM server.
readout: readout::new_shared(),
state: crate::Mutex::new(AgentState {
tools,
mcp_tools: McpToolAccess::None,
@ -309,42 +279,26 @@ impl Agent {
pending_dmn_pause: false,
provenance: st.provenance.clone(),
generation: 0,
memory_scoring_in_flight: false,
active_tools: tools::ActiveTools::new(),
priority: None,
no_compact: true,
changed: Arc::new(tokio::sync::Notify::new()),
}),
})
}
pub async fn assemble_prompt_tokens(&self) -> Vec<u32> {
self.assemble_prompt().await.0
}
/// Assemble a ready-to-send prompt: token stream in wire form (each
/// image collapsed to a single `<|image_pad|>`) paired with the
/// images to attach as multi_modal_data.
///
/// Pre-send size check: if the context has grown past budget since the
/// last compact (accumulation between turns, a fork's context getting
/// bigger than expected, etc.), trim here rather than letting vLLM
/// reject the request. Client-side tokenization means we already know
/// the exact token count so there's no reason to round-trip an
/// oversize request.
pub async fn assemble_prompt(&self) -> (Vec<u32>, Vec<context::WireImage>) {
let mut ctx = self.context.lock().await;
if ctx.total_tokens() > context::context_budget_tokens() {
ctx.trim_conversation();
}
let ctx = self.context.lock().await;
let st = self.state.lock().await;
let (mut tokens, images, _) =
ctx.wire_prompt(0..ctx.conversation().len(), |_| false);
let mut tokens = ctx.token_ids();
tokens.push(tokenizer::IM_START);
if st.think_native {
tokens.extend(tokenizer::encode("assistant\n<think>\n"));
} else {
tokens.extend(tokenizer::encode("assistant\n"));
}
(tokens, images)
tokens
}
/// Rebuild the tools section of the system prompt from the current tools list.
@ -404,11 +358,10 @@ impl Agent {
let _thinking = start_activity(&agent, "thinking...").await;
let (rx, _stream_guard) = {
let (prompt_tokens, images) = agent.assemble_prompt().await;
let prompt_tokens = agent.assemble_prompt_tokens().await;
let st = agent.state.lock().await;
agent.client.stream_completion_mm(
agent.client.stream_completion(
&prompt_tokens,
&images,
api::SamplingParams {
temperature: st.temperature,
top_p: st.top_p,
@ -456,16 +409,21 @@ impl Agent {
// Check for stream/parse errors
match parser_handle.await {
Ok(Err(e)) => {
if context::is_context_overflow(&e) && overflow_retries < 2 {
overflow_retries += 1;
let msg = format!("context overflow — compacting ({}/2)", overflow_retries);
match &overflow_activity {
Some(a) => a.update(&msg).await,
None => overflow_activity = Some(
start_activity(&agent, &msg).await),
if context::is_context_overflow(&e) {
if agent.state.lock().await.no_compact {
return Err(e);
}
if overflow_retries < 2 {
overflow_retries += 1;
let msg = format!("context overflow — compacting ({}/2)", overflow_retries);
match &overflow_activity {
Some(a) => a.update(&msg).await,
None => overflow_activity = Some(
start_activity(&agent, &msg).await),
}
agent.compact().await;
continue;
}
agent.compact().await;
continue;
}
return Err(e);
}
@ -621,9 +579,20 @@ impl Agent {
}
pub async fn compact(&self) {
// Identity section is left in place — mid-session rebuilds discard
// memory scores. Content edits to personality nodes get picked up at
// the next restart via new() + restore_from_log().
match crate::config::reload_context().await {
Ok(personality) => {
let mut ctx = self.context.lock().await;
// System section (prompt + tools) set by new(), don't touch it
ctx.clear(Section::Identity);
for (name, content) in &personality {
ctx.push_no_log(Section::Identity, AstNode::memory(name, content));
}
}
Err(e) => {
dbglog!("warning: failed to reload identity: {:#}", e);
}
}
self.load_startup_journal().await;
self.context.lock().await.trim_conversation();

View file

@ -183,8 +183,8 @@ fn resolve_prompt(
state: &std::collections::BTreeMap<String, String>,
recently_written: &[String],
) -> String {
let template = template.replace("{assistant_name}",
&crate::config::app().assistant_name);
let cfg = crate::config::get();
let template = template.replace("{assistant_name}", &cfg.assistant_name);
let mut result = String::with_capacity(template.len());
let mut rest = template.as_str();
while let Some(start) = rest.find("{{") {
@ -247,20 +247,25 @@ impl AutoAgent {
&mut self,
bail_fn: Option<&(dyn Fn(usize) -> Result<(), String> + Sync)>,
) -> Result<(), String> {
// Load system prompt + identity from config.
let config = crate::config::get();
let base_url = config.api_base_url.as_deref().unwrap_or("");
let api_key = config.api_key.as_deref().unwrap_or("");
let model = config.api_model.as_deref().unwrap_or("");
if base_url.is_empty() || model.is_empty() {
return Err("API not configured (no base_url or model)".to_string());
}
let client = super::api::ApiClient::new(base_url, api_key, model);
// Load system prompt + identity from config
let cli = crate::user::CliArgs::default();
let (app, _) = crate::config::load_app(&cli)
.map_err(|e| format!("config: {}", e))?;
let resolved = app.resolve_model(&app.default_backend)
.map_err(|e| format!("API not configured: {}", e))?;
let client = super::api::ApiClient::new(
&resolved.api_base, &resolved.api_key, &resolved.model_id);
let personality = crate::config::reload_context()
.await.map_err(|e| format!("config: {}", e))?;
let agent = Agent::new(
client, personality,
app,
app, String::new(),
None,
super::tools::ActiveTools::new(),
super::tools::tools(),
@ -492,20 +497,15 @@ pub async fn run_one_agent(
.map(|s| s.phase.clone()).collect();
// Bail check: if the agent defines a bail script, run it between steps.
// The script also refreshes our pid-file with the current phase — that's
// how concurrent agents know which phase each of us is in.
let bail_script = def.bail.as_ref().map(|name| defs::agents_dir().join(name));
let state_dir_for_bail = state_dir.clone();
// Find our own pid file so we can pass it to the bail script
let our_pid = std::process::id();
let our_pid_file = format!("pid-{}", our_pid);
let step_phases_for_bail = step_phases.clone();
let bail_fn = move |step_idx: usize| -> Result<(), String> {
if let Some(ref script) = bail_script {
let phase = step_phases_for_bail.get(step_idx)
.map(String::as_str).unwrap_or("");
let status = std::process::Command::new(script)
.arg(&our_pid_file)
.arg(phase)
.current_dir(&state_dir_for_bail)
.status()
.map_err(|e| format!("bail script {:?} failed: {}", script, e))?;

View file

@ -1,75 +0,0 @@
// agent/readout.rs — live buffer of concept-readout projections.
//
// The vLLM server projects residual-stream activations onto a fixed
// matrix of concept directions during each decode step and ships the
// result back on every streamed chunk (see
// vllm/docs/features/readout.md). This module owns the client-side
// landing pad: a ring of the last N token projections plus the
// concept/layer mapping fetched from `/v1/readout/manifest` at
// startup.
//
// Readers (UI screens) lock briefly, read a snapshot, release. Writers
// (the streaming token handler) push one entry per token. Intentionally
// a simple Mutex<VecDeque> rather than lock-free — the UI ticks at
// ~15 Hz and the stream at token-rate, contention is nil.
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use super::api::{ReadoutManifest, TokenReadout};
/// Default ring length — at ~30 tok/s this is ~6 seconds of history,
/// enough for the amygdala screen's scrolling display.
const DEFAULT_RING_LEN: usize = 200;
/// One entry in the readout ring: the sampled token and its per-layer
/// concept projection vector.
#[derive(Debug, Clone)]
pub struct ReadoutEntry {
pub token_id: u32,
/// Shape `[n_layers][n_concepts]`.
pub readout: TokenReadout,
}
/// Shared buffer of recent per-token concept projections plus the
/// manifest that names the layer/concept indices. `manifest` is `None`
/// when the server has readout disabled or the fetch failed — callers
/// should treat that as "readout unavailable" and skip rendering.
#[derive(Default)]
pub struct ReadoutBuffer {
pub manifest: Option<ReadoutManifest>,
pub recent: VecDeque<ReadoutEntry>,
pub max_len: usize,
}
impl ReadoutBuffer {
pub fn new() -> Self {
Self {
manifest: None,
recent: VecDeque::with_capacity(DEFAULT_RING_LEN),
max_len: DEFAULT_RING_LEN,
}
}
pub fn set_manifest(&mut self, manifest: Option<ReadoutManifest>) {
self.manifest = manifest;
}
pub fn push(&mut self, token_id: u32, readout: TokenReadout) {
if self.recent.len() >= self.max_len {
self.recent.pop_front();
}
self.recent.push_back(ReadoutEntry { token_id, readout });
}
pub fn is_enabled(&self) -> bool {
self.manifest.is_some()
}
}
/// A thread-safe handle.
pub type SharedReadoutBuffer = Arc<Mutex<ReadoutBuffer>>;
pub fn new_shared() -> SharedReadoutBuffer {
Arc::new(Mutex::new(ReadoutBuffer::new()))
}

View file

@ -16,9 +16,6 @@ static TOKENIZER: OnceLock<Tokenizer> = OnceLock::new();
/// Special token IDs for Qwen 3.5
pub const IM_START: u32 = 248045;
pub const IM_END: u32 = 248046;
pub const VISION_START: u32 = 248053;
pub const VISION_END: u32 = 248054;
pub const IMAGE_PAD: u32 = 248056;
/// Initialize the global tokenizer from a file path.
/// Call once at startup. Panics if the file can't be loaded.

View file

@ -242,7 +242,13 @@ pub fn summarize_args(tool_name: &str, args: &serde_json::Value) -> String {
.as_str()
.unwrap_or("")
.to_string(),
"view_image" => args["file_path"].as_str().unwrap_or("").to_string(),
"view_image" => {
if let Some(pane) = args["pane_id"].as_str() {
format!("pane {}", pane)
} else {
args["file_path"].as_str().unwrap_or("").to_string()
}
}
"journal" => {
let entry = args["entry"].as_str().unwrap_or("");
if entry.len() > 60 {

View file

@ -1,71 +1,96 @@
use std::sync::Arc;
// tools/vision.rs — Image viewing tool
//
// Reads an image file from disk, decodes its dimensions, and injects it
// into the context as a user-role message containing a NodeBody::Image
// leaf. The leaf carries raw bytes; the API layer extracts them into
// multi_modal_data when building vLLM requests.
use std::sync::Arc;
// Reads image files from disk and returns them as base64 data URIs
// for multimodal models. Also supports capturing tmux pane contents
// as screenshots.
use anyhow::{Context, Result};
use base64::Engine;
use serde::Deserialize;
use crate::agent::context::{AstNode, Role, Section};
#[derive(Deserialize)]
struct Args {
file_path: String,
file_path: Option<String>,
pane_id: Option<String>,
#[serde(default = "default_lines")]
lines: usize,
}
fn default_lines() -> usize { 50 }
pub fn tool() -> super::Tool {
super::Tool {
name: "view_image",
description: "View an image file. Supports PNG, JPEG, GIF, WebP, BMP. The image is inserted into the conversation and can be analyzed by the vision model.",
parameters_json: r#"{"type":"object","properties":{"file_path":{"type":"string","description":"Path to the image file"}},"required":["file_path"]}"#,
handler: Arc::new(|agent, v| Box::pin(async move {
view_image(agent, v).await
})),
description: "View an image file or capture a tmux pane screenshot. Supports PNG, JPEG, GIF, WebP. Use pane_id to capture a tmux pane instead.",
parameters_json: r#"{"type":"object","properties":{"file_path":{"type":"string","description":"Path to an image file"},"pane_id":{"type":"string","description":"Tmux pane ID to capture (e.g. '0:1.0')"},"lines":{"type":"integer","description":"Lines to capture from tmux pane (default 50)"}}}"#,
handler: Arc::new(|_a, v| Box::pin(async move { view_image_text(&v) })),
}
}
const MAX_SIZE: usize = 20 * 1024 * 1024;
async fn view_image(
agent: Option<Arc<crate::agent::Agent>>,
args: serde_json::Value,
) -> Result<String> {
let a: Args = serde_json::from_value(args)
fn view_image_text(args: &serde_json::Value) -> anyhow::Result<String> {
let a: Args = serde_json::from_value(args.clone())
.context("invalid view_image arguments")?;
let path = std::path::Path::new(&a.file_path);
if !path.exists() {
anyhow::bail!("file not found: {}", a.file_path);
if let Some(ref pane_id) = a.pane_id {
return capture_tmux_pane(pane_id, a.lines);
}
let bytes = std::fs::read(path)
.with_context(|| format!("reading {}", a.file_path))?;
let file_path = a.file_path
.as_deref()
.context("view_image requires either file_path or pane_id")?;
if bytes.len() > MAX_SIZE {
let path = std::path::Path::new(file_path);
if !path.exists() {
anyhow::bail!("File not found: {}", file_path);
}
let data = std::fs::read(path).with_context(|| format!("Failed to read {}", file_path))?;
// Sanity check file size (don't send huge images)
const MAX_SIZE: usize = 20 * 1024 * 1024; // 20 MB
if data.len() > MAX_SIZE {
anyhow::bail!(
"image too large: {} bytes (max {} MB)",
bytes.len(), MAX_SIZE / (1024 * 1024),
"Image too large: {} bytes (max {} MB)",
data.len(),
MAX_SIZE / (1024 * 1024)
);
}
let dim = imagesize::blob_size(&bytes)
.with_context(|| format!("decoding dimensions of {}", a.file_path))?;
let (w, h) = (dim.width as u32, dim.height as u32);
let mime = mime_from_extension(path);
let b64 = base64::engine::general_purpose::STANDARD.encode(&data);
let data_uri = format!("data:{};base64,{}", mime, b64);
let image_leaf = AstNode::image(bytes.clone(), mime, h, w);
let token_count = image_leaf.leaf().unwrap().tokens().saturating_sub(2);
Ok(format!("Image loaded: {} ({}, {} bytes)\n{}", file_path, mime, data.len(), data_uri))
}
let agent = agent.context("view_image requires agent context")?;
let branch = AstNode::branch(Role::User, vec![image_leaf]);
agent.context.lock().await.push_log(Section::Conversation, branch);
/// Capture a tmux pane's text content.
fn capture_tmux_pane(pane_id: &str, lines: usize) -> Result<String> {
Ok(format!("loaded {} ({}, {}x{}, {} tokens)",
a.file_path, mime, w, h, token_count))
// Use tmux capture-pane to get text content, then render to image
// via a simple approach: capture text and return it (the model can
// read text directly, which is often more useful than a screenshot).
//
// For actual pixel-level screenshots we'd need a terminal renderer,
// but text capture covers 95% of use cases.
let output = std::process::Command::new("tmux")
.args(["capture-pane", "-t", pane_id, "-p", "-S", &format!("-{}", lines)])
.output()
.context("Failed to run tmux capture-pane")?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
anyhow::bail!("tmux capture-pane failed: {}", stderr.trim());
}
let text = String::from_utf8_lossy(&output.stdout).to_string();
// Return as text — the model can read terminal output directly.
// This is actually more useful than a screenshot for most tasks.
Ok(format!(
"Tmux pane {} (last {} lines):\n```\n{}\n```",
pane_id, lines, text.trim_end()
))
}
fn mime_from_extension(path: &std::path::Path) -> &'static str {
@ -79,7 +104,8 @@ fn mime_from_extension(path: &std::path::Path) -> &'static str {
Some("jpg" | "jpeg") => "image/jpeg",
Some("gif") => "image/gif",
Some("webp") => "image/webp",
Some("svg") => "image/svg+xml",
Some("bmp") => "image/bmp",
_ => "application/octet-stream",
_ => "image/png", // default assumption
}
}

View file

@ -3,10 +3,9 @@ use std::sync::Arc;
use anyhow::{Context, Result};
use serde::Deserialize;
use html2md::parse_html;
pub fn tools() -> Vec<super::Tool> {
let mut tools = vec![
pub fn tools() -> [super::Tool; 2] {
[
super::Tool {
name: "web_fetch",
description: "Fetch content from a URL and return it as text. Use for reading web pages, API responses, documentation.",
@ -15,24 +14,11 @@ pub fn tools() -> Vec<super::Tool> {
},
super::Tool {
name: "web_search",
description: "Search the web via DuckDuckGo and return a list of results (title, URL, snippet). Use for finding documentation, looking up APIs, researching topics. Returns raw results you can reason over yourself.",
description: "Search the web and return results. Use for finding documentation, looking up APIs, researching topics.",
parameters_json: r#"{"type":"object","properties":{"query":{"type":"string","description":"The search query"},"num_results":{"type":"integer","description":"Number of results to return (default 5)"}},"required":["query"]}"#,
handler: Arc::new(|_a, v| Box::pin(async move { web_search(&v).await })),
},
];
// Gemini-grounded search (Google's index via Gemini's google_search tool)
// is only available if GEMINI_API_KEY is set. Returns an LLM-summarized
// answer with source URLs — use when you want a synthesized take rather
// than raw results, or as a fallback when DDG is flaky.
if std::env::var("GEMINI_API_KEY").is_ok() {
tools.push(super::Tool {
name: "gemini_search",
description: "Search Google (via Gemini's grounded-search tool) and return an LLM-summarized answer with source URLs. Prefer web_search for raw results; use this for synthesis, 'what's the consensus on X', or when DDG fails. Free-tier rate limited; don't spam it.",
parameters_json: r#"{"type":"object","properties":{"query":{"type":"string","description":"The search query"}},"required":["query"]}"#,
handler: Arc::new(|_a, v| Box::pin(async move { gemini_search(&v).await })),
});
}
tools
]
}
#[derive(Deserialize)]
@ -56,9 +42,7 @@ async fn web_fetch(args: &serde_json::Value) -> Result<String> {
let body = response.text().await
.with_context(|| format!("failed to read body from {}", a.url))?;
// Convert HTML to Markdown, then truncate
let markdown = parse_html(&body);
Ok(super::truncate_output(markdown, 30000))
Ok(super::truncate_output(body, 30000))
}
// ── Search ──────────────────────────────────────────────────────
@ -127,119 +111,6 @@ async fn web_search(args: &serde_json::Value) -> Result<String> {
}
}
// ── Gemini grounded search ──────────────────────────────────────
#[derive(Deserialize)]
struct GeminiSearchArgs {
query: String,
}
async fn gemini_search(args: &serde_json::Value) -> Result<String> {
let a: GeminiSearchArgs = serde_json::from_value(args.clone())
.context("invalid gemini_search arguments")?;
let api_key = std::env::var("GEMINI_API_KEY")
.context("GEMINI_API_KEY not set")?;
// gemini-2.0-flash has a free tier with Google search grounding.
// Request shape: `{"contents": [{"parts": [{"text": query}]}],
// "tools": [{"google_search": {}}]}`.
// Response carries the summary in candidates[0].content.parts[].text
// and grounding URLs in candidates[0].groundingMetadata.groundingChunks[].web.
let url = format!(
"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={}",
api_key
);
let body = serde_json::json!({
"contents": [{"parts": [{"text": a.query}]}],
"tools": [{"google_search": {}}],
});
let client = http_client();
let response = client.send_json("POST", &url, &[], &body).await
.context("gemini API request failed")?;
let status = response.status();
if !status.is_success() {
let err_body = response.text().await.unwrap_or_default();
let n = err_body.floor_char_boundary(err_body.len().min(500));
anyhow::bail!("gemini_search HTTP {}: {}", status, &err_body[..n]);
}
let parsed: GeminiResponse = response.json().await
.context("gemini response parse failed")?;
let candidate = parsed.candidates.into_iter().next()
.context("gemini returned no candidates")?;
let summary: String = candidate.content.parts.iter()
.filter_map(|p| p.text.as_deref())
.collect::<Vec<_>>()
.join("");
let mut out = summary.trim().to_string();
if let Some(meta) = candidate.grounding_metadata {
let sources: Vec<String> = meta.grounding_chunks.iter().enumerate()
.filter_map(|(i, c)| c.web.as_ref().map(|w| {
let title = w.title.as_deref().unwrap_or("(untitled)");
let uri = w.uri.as_deref().unwrap_or("");
format!(" [{}] {}{}", i + 1, title, uri)
}))
.collect();
if !sources.is_empty() {
out.push_str("\n\nSources:\n");
out.push_str(&sources.join("\n"));
}
}
Ok(super::truncate_output(out, 30000))
}
#[derive(Deserialize)]
struct GeminiResponse {
#[serde(default)]
candidates: Vec<GeminiCandidate>,
}
#[derive(Deserialize)]
struct GeminiCandidate {
content: GeminiContent,
#[serde(rename = "groundingMetadata", default)]
grounding_metadata: Option<GeminiGroundingMetadata>,
}
#[derive(Deserialize)]
struct GeminiContent {
#[serde(default)]
parts: Vec<GeminiPart>,
}
#[derive(Deserialize)]
struct GeminiPart {
#[serde(default)]
text: Option<String>,
}
#[derive(Deserialize)]
struct GeminiGroundingMetadata {
#[serde(rename = "groundingChunks", default)]
grounding_chunks: Vec<GeminiGroundingChunk>,
}
#[derive(Deserialize)]
struct GeminiGroundingChunk {
#[serde(default)]
web: Option<GeminiWebSource>,
}
#[derive(Deserialize)]
struct GeminiWebSource {
#[serde(default)]
uri: Option<String>,
#[serde(default)]
title: Option<String>,
}
// ── Helpers ─────────────────────────────────────────────────────
fn http_client() -> crate::agent::api::http::HttpClient {

View file

@ -1,180 +0,0 @@
// fix-timestamps: One-off migration for ~/.consciousness/agent-sessions/
// conversation.jsonl.
//
// Before Branch nodes carried their own timestamps, early entries were
// serialized with missing/null timestamp fields — they deserialize as
// UNIX_EPOCH via the (now-to-be-removed) deserialize_timestamp_or_epoch
// fallback. Training needs every entry to have a unique timestamp to
// dedup already-trained responses.
//
// Walks the file, synthesizes timestamps for any entry stuck at
// UNIX_EPOCH by linear interpolation between surrounding real
// timestamps. For child leaves inside a Branch, derives timestamps
// from the parent with a tiny per-child offset.
//
// SAFETY: reads from argv[1], writes to argv[1].tmp, renames into
// place. Keep a .bak copy before running.
//
// Usage: fix-timestamps <path-to-conversation.jsonl>
use std::io::{BufRead, BufReader, BufWriter, Write};
use std::path::PathBuf;
use anyhow::{Context, Result};
use chrono::{DateTime, Duration, Utc};
use consciousness::agent::context::AstNode;
fn main() -> Result<()> {
let path: PathBuf = std::env::args().nth(1)
.context("usage: fix-timestamps <path>")?.into();
let f = std::fs::File::open(&path)
.with_context(|| format!("open {}", path.display()))?;
let reader = BufReader::new(f);
let mut nodes: Vec<AstNode> = Vec::new();
for (i, line) in reader.lines().enumerate() {
let line = line?;
if line.trim().is_empty() { continue; }
let node: AstNode = serde_json::from_str(&line)
.with_context(|| format!("line {}: parse", i + 1))?;
nodes.push(node);
}
println!("read {} entries", nodes.len());
fix_top_level_timestamps(&mut nodes);
for node in &mut nodes {
propagate_to_children(node);
}
// Ensure uniqueness — real timestamps can collide when two entries
// were written in the same ns; synthesized ones can also overlap.
// Bump colliding ns by 1 until unique.
let mut seen = std::collections::HashSet::new();
let mut bumps = 0usize;
for (i, node) in nodes.iter_mut().enumerate() {
let ts = top_ts(node);
assert!(ts > DateTime::<Utc>::UNIX_EPOCH,
"entry {}: still UNIX_EPOCH", i);
let mut ns = ts.timestamp_nanos_opt().expect("ts in i64 ns range");
let mut bumped = false;
while !seen.insert(ns) {
ns += 1;
bumped = true;
bumps += 1;
}
if bumped {
set_top_ts(node, DateTime::<Utc>::from_timestamp_nanos(ns));
}
}
println!("all {} timestamps real and unique ({} ns bumps)",
nodes.len(), bumps);
let tmp = path.with_extension("jsonl.tmp");
{
let f = std::fs::File::create(&tmp)
.with_context(|| format!("create {}", tmp.display()))?;
let mut w = BufWriter::new(f);
for node in &nodes {
serde_json::to_writer(&mut w, node)?;
w.write_all(b"\n")?;
}
w.flush()?;
}
std::fs::rename(&tmp, &path)
.with_context(|| format!("rename {} -> {}", tmp.display(), path.display()))?;
println!("wrote {}", path.display());
Ok(())
}
fn top_ts(node: &AstNode) -> DateTime<Utc> {
match node {
AstNode::Leaf(leaf) => leaf.timestamp(),
AstNode::Branch { timestamp, .. } => *timestamp,
}
}
fn set_top_ts(node: &mut AstNode, ts: DateTime<Utc>) {
match node {
AstNode::Leaf(leaf) => *leaf = leaf.clone().with_timestamp(ts),
AstNode::Branch { timestamp, .. } => *timestamp = ts,
}
}
/// Fill in missing top-level timestamps. Strategy:
/// - If two real timestamps bracket a run of missing ones, linearly
/// interpolate between them.
/// - If missing ones precede the first real one, back-fill using
/// (first_real - N·1µs).
/// - If missing ones follow the last real one, forward-fill.
/// - If no real timestamps exist at all, synthesize from now() going
/// backwards.
fn fix_top_level_timestamps(nodes: &mut [AstNode]) {
let real: Vec<(usize, DateTime<Utc>)> = nodes.iter().enumerate()
.filter(|(_, n)| top_ts(n) > DateTime::<Utc>::UNIX_EPOCH)
.map(|(i, n)| (i, top_ts(n)))
.collect();
if real.is_empty() {
let now = Utc::now();
let len = nodes.len();
for (i, node) in nodes.iter_mut().enumerate() {
let ts = now - Duration::microseconds((len - i) as i64);
set_top_ts(node, ts);
}
return;
}
// Helper: bisect real[] for the nearest real entries around idx.
let find_bracket = |idx: usize| -> (Option<(usize, DateTime<Utc>)>,
Option<(usize, DateTime<Utc>)>) {
let pos = real.binary_search_by_key(&idx, |(i, _)| *i);
let (prior_pos, next_pos) = match pos {
Ok(p) => (Some(p), Some(p)),
Err(p) => (
if p == 0 { None } else { Some(p - 1) },
if p >= real.len() { None } else { Some(p) },
),
};
(prior_pos.map(|p| real[p]), next_pos.map(|p| real[p]))
};
for i in 0..nodes.len() {
if top_ts(&nodes[i]) > DateTime::<Utc>::UNIX_EPOCH {
continue;
}
let (prior, next) = find_bracket(i);
let new_ts = match (prior, next) {
(Some((pi, pt)), Some((ni, nt))) if pi != ni => {
// Linear interpolate.
let span_ns = (nt - pt).num_nanoseconds().unwrap_or(0);
let offset_ns = span_ns * (i - pi) as i64 / (ni - pi) as i64;
pt + Duration::nanoseconds(offset_ns)
}
(Some((pi, pt)), _) => {
pt + Duration::microseconds((i - pi) as i64)
}
(None, Some((ni, nt))) => {
nt - Duration::microseconds((ni - i) as i64)
}
(None, None) => unreachable!(),
};
set_top_ts(&mut nodes[i], new_ts);
}
}
/// For every Branch, ensure each child Leaf has a timestamp. If missing,
/// use parent.ts + child_idx·1ns so siblings stay unique but close.
fn propagate_to_children(node: &mut AstNode) {
if let AstNode::Branch { timestamp, children, .. } = node {
let parent_ts = *timestamp;
for (ci, child) in children.iter_mut().enumerate() {
if top_ts(child) <= DateTime::<Utc>::UNIX_EPOCH {
set_top_ts(child, parent_ts + Duration::nanoseconds(ci as i64));
}
propagate_to_children(child);
}
}
}

View file

@ -197,7 +197,7 @@ pub async fn cmd_load_context(stats: bool) -> Result<()> {
return Ok(());
}
println!("=== MEMORY SYSTEM ({}) ===", crate::config::app().assistant_name);
println!("=== MEMORY SYSTEM ({}) ===", cfg.assistant_name);
if !personality.is_empty() {
println!("--- personality_nodes ({}) ---", personality.len());

View file

@ -3,6 +3,9 @@
// Single config file: ~/.consciousness/config.json5
// Memory settings in the "memory" section (Config)
// Agent/backend settings at top level (AppConfig)
//
// Legacy fallback: ~/.consciousness/config.jsonl
// Env override: POC_MEMORY_CONFIG
use std::collections::HashMap;
use std::path::PathBuf;
@ -26,12 +29,11 @@ pub fn config_path() -> PathBuf {
static CONFIG: OnceLock<RwLock<Arc<Config>>> = OnceLock::new();
fn default_context_window() -> usize { 128_000 }
fn default_stream_timeout() -> u64 { 60 }
fn default_scoring_chunk_tokens() -> usize { 50_000 }
fn default_scoring_interval_secs() -> u64 { 3600 } // 1 hour
fn default_scoring_response_window() -> usize { 100 }
fn default_surface_hooks() -> Vec<String> {
vec!["UserPromptSubmit".into(), "PostToolUse".into(), "Stop".into()]
}
fn default_node_weight() -> f64 { 0.7 }
fn default_edge_decay() -> f64 { 0.3 }
fn default_max_hops() -> u32 { 3 }
@ -43,6 +45,8 @@ fn default_identity_dir() -> PathBuf {
#[derive(Debug, Clone, Deserialize)]
#[serde(default)]
pub struct Config {
pub user_name: String,
pub assistant_name: String,
#[serde(deserialize_with = "deserialize_path")]
pub data_dir: PathBuf,
#[serde(default = "default_identity_dir", deserialize_with = "deserialize_path")]
@ -58,27 +62,50 @@ pub struct Config {
/// Nodes loaded into subconscious agent context
#[serde(default)]
pub agent_nodes: Vec<String>,
pub journal_days: u32,
pub journal_max: usize,
pub llm_concurrency: usize,
pub agent_budget: usize,
#[serde(deserialize_with = "deserialize_path")]
pub prompts_dir: PathBuf,
/// Resolved from agent_model → models → backend (not in config directly)
#[serde(skip)]
pub api_base_url: Option<String>,
#[serde(skip)]
pub api_key: Option<String>,
#[serde(skip)]
pub api_model: Option<String>,
#[serde(skip, default = "default_context_window")]
pub api_context_window: usize,
/// Used to resolve API settings, not stored on Config
#[serde(default)]
agent_model: Option<String>,
/// Stream chunk timeout in seconds (no data = timeout).
#[serde(default = "default_stream_timeout")]
pub api_stream_timeout_secs: u64,
/// Max tokens per chunk for memory scoring logprobs calls.
#[serde(default = "default_scoring_chunk_tokens")]
pub scoring_chunk_tokens: usize,
/// How often to re-score memory nodes (seconds). Default: 3600 (1 hour).
#[serde(default = "default_scoring_interval_secs")]
pub scoring_interval_secs: u64,
/// Number of assistant responses to score per memory. Default: 50.
#[serde(default = "default_scoring_response_window")]
pub scoring_response_window: usize,
pub api_reasoning: String,
pub agent_types: Vec<String>,
#[serde(default)]
pub mcp_servers: Vec<McpServerConfig>,
#[serde(default)]
pub lsp_servers: Vec<LspServerConfig>,
/// Surface agent timeout in seconds.
#[serde(default)]
pub surface_timeout_secs: Option<u32>,
/// Max conversation bytes to include in surface agent context.
#[serde(default)]
pub surface_conversation_bytes: Option<usize>,
/// Claude Code hook events that trigger agent cycles (surface-observe,
/// reflect, journal). Read by consciousness-claude/src/hook.rs.
#[serde(default = "default_surface_hooks")]
/// Hook events that trigger the surface agent.
#[serde(default)]
pub surface_hooks: Vec<String>,
// Spreading activation parameters
@ -96,22 +123,36 @@ impl Default for Config {
fn default() -> Self {
let home = dirs::home_dir().unwrap_or_default();
Self {
user_name: "User".to_string(),
assistant_name: "Assistant".to_string(),
data_dir: home.join(".consciousness/memory"),
identity_dir: home.join(".consciousness/identity"),
projects_dir: home.join(".claude/projects"),
protected_nodes: Vec::new(),
personality_nodes: vec!["identity".into(), "core-practices".into()],
agent_nodes: vec!["identity".into(), "core-practices".into()],
journal_days: 7,
journal_max: 20,
llm_concurrency: 1,
agent_budget: 1000,
prompts_dir: home.join(".consciousness/prompts"),
api_base_url: None,
api_key: None,
api_model: None,
api_context_window: default_context_window(),
api_stream_timeout_secs: default_stream_timeout(),
scoring_chunk_tokens: default_scoring_chunk_tokens(),
scoring_interval_secs: default_scoring_interval_secs(),
scoring_response_window: default_scoring_response_window(),
agent_model: None,
api_reasoning: "high".to_string(),
agent_types: vec![
"linker".into(), "organize".into(), "distill".into(),
"separator".into(), "split".into(),
],
surface_timeout_secs: None,
surface_conversation_bytes: None,
surface_hooks: default_surface_hooks(),
surface_hooks: vec![],
mcp_servers: vec![],
lsp_servers: vec![],
default_node_weight: default_node_weight(),
@ -124,20 +165,41 @@ impl Default for Config {
impl Config {
fn load_from_file() -> Self {
Self::try_load_shared().unwrap_or_default()
if let Some(config) = Self::try_load_shared() {
return config;
}
Self::load_legacy_jsonl()
}
/// Load from shared config. Memory settings in the "memory" section;
/// API settings resolved from models + backend configuration.
fn try_load_shared() -> Option<Self> {
let content = std::fs::read_to_string(config_path()).ok()?;
let root: serde_json::Value = json_five::from_str(&content).ok()?;
let root: serde_json::Value = json5::from_str(&content).ok()?;
let mem_value = root.get("memory")?;
let mut config: Config = serde_json::from_value(mem_value.clone()).ok()?;
config.llm_concurrency = config.llm_concurrency.max(1);
// Top-level sections (not inside "memory").
// Resolve API settings: agent_model → models → backend
if let Some(model_name) = &config.agent_model
&& let Some(model_cfg) = root.get("models").and_then(|m| m.get(model_name.as_str())) {
let backend_name = model_cfg.get("backend").and_then(|v| v.as_str()).unwrap_or("");
let model_id = model_cfg.get("model_id").and_then(|v| v.as_str()).unwrap_or("");
if let Some(backend) = root.get(backend_name) {
config.api_base_url = backend.get("base_url")
.and_then(|v| v.as_str()).map(String::from);
config.api_key = backend.get("api_key")
.and_then(|v| v.as_str()).map(String::from);
}
config.api_model = Some(model_id.to_string());
if let Some(cw) = model_cfg.get("context_window").and_then(|v| v.as_u64()) {
config.api_context_window = cw as usize;
}
}
// Top-level config sections (not inside "memory")
if let Some(servers) = root.get("lsp_servers") {
config.lsp_servers = serde_json::from_value(servers.clone()).unwrap_or_default();
}
@ -147,6 +209,11 @@ impl Config {
Some(config)
}
/// Load from legacy JSONL config — deprecated, just return defaults.
fn load_legacy_jsonl() -> Self {
Config::default()
}
}
/// Get the global memory config (cheap Arc clone).
@ -170,87 +237,27 @@ pub fn reload() -> bool {
changed
}
/// Spawn a background thread that watches `~/.consciousness/config.json5`
/// and reloads both the memory Config and the global AppConfig whenever
/// the file changes on disk. Lets edits from vim / F6 hotkeys / manual
/// tweaks land live without restarting the process.
pub fn watch_config(cli: crate::user::CliArgs) {
use notify_debouncer_mini::{new_debouncer, notify::RecursiveMode};
let path = config_path();
// Watch the parent directory — editors often replace-via-rename, so
// watching the file itself misses the new inode.
let Some(parent) = path.parent().map(|p| p.to_path_buf()) else {
crate::dbglog!("[config] no parent for {}, skipping watch", path.display());
return;
};
std::thread::Builder::new()
.name("config-watcher".into())
.spawn(move || {
let (tx, rx) = std::sync::mpsc::channel();
let mut debouncer = match new_debouncer(std::time::Duration::from_millis(200), tx) {
Ok(d) => d,
Err(e) => {
crate::dbglog!("[config] watcher setup failed: {}", e);
return;
}
};
if let Err(e) = debouncer.watcher()
.watch(&parent, RecursiveMode::NonRecursive)
{
crate::dbglog!("[config] watch({}) failed: {}", parent.display(), e);
return;
}
crate::dbglog!("[config] watching {}", path.display());
while let Ok(res) = rx.recv() {
let Ok(events) = res else { continue; };
if !events.iter().any(|e| e.path == path) { continue; }
// Reload both halves.
let mem_changed = reload();
let app_changed = match build_figment(&cli).extract::<AppConfig>() {
Ok(app) => {
install_app(app);
true
}
Err(e) => {
crate::dbglog!("[config] reload: AppConfig parse failed: {}", e);
false
}
};
crate::dbglog!("[config] reloaded (memory_changed={}, app_changed={})",
mem_changed, app_changed);
}
})
.ok();
}
// ============================================================
// Agent config (top-level settings)
// ============================================================
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AppConfig {
#[serde(default = "default_user_name")]
pub user_name: String,
#[serde(default = "default_assistant_name")]
pub assistant_name: String,
/// Named model endpoints — credentials, base URL, and model id bundled
/// into one entry per backend. Keyed by name, selected by
/// `default_backend` or by `--model <name>` on the CLI.
pub backend: String,
pub anthropic: BackendConfig,
pub openrouter: BackendConfig,
#[serde(default)]
pub backends: HashMap<String, BackendConfig>,
#[serde(default)]
pub default_backend: String,
pub deepinfra: BackendConfig,
pub prompts: PromptConfig,
pub debug: bool,
pub compaction: CompactionConfig,
pub dmn: DmnConfig,
#[serde(skip_serializing_if = "Option::is_none")]
pub memory_project: Option<PathBuf>,
#[serde(default)]
pub learn: LearnConfig,
#[serde(default)]
pub compare: CompareConfig,
pub models: HashMap<String, ModelConfig>,
#[serde(default = "default_model_name")]
pub default_model: String,
#[serde(default)]
pub mcp_servers: Vec<McpServerConfig>,
#[serde(default)]
@ -277,17 +284,32 @@ pub struct LspServerConfig {
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BackendConfig {
/// API key for the backend.
#[serde(default)]
pub api_key: String,
/// Base URL for the backend's OpenAI-compatible endpoint.
#[serde(default, skip_serializing_if = "Option::is_none")]
#[serde(default)]
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub base_url: Option<String>,
/// Model identifier sent to the API.
pub model_id: String,
/// Context window size in tokens.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub context_window: Option<usize>,
}
impl BackendConfig {
fn resolve(&self, default_base: &str) -> Result<(String, String, String)> {
if self.api_key.is_empty() {
anyhow::bail!(
"No API key. Set it in {} or use --api-key",
config_path().display()
);
}
let base = self.base_url.clone()
.unwrap_or_else(|| default_base.to_string());
Ok((base, self.api_key.clone(), self.model.clone()))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PromptConfig {
pub anthropic: String,
pub other: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -302,68 +324,65 @@ pub struct DmnConfig {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LearnConfig {
/// Divergence threshold — responses scoring above this become
/// fine-tuning candidates. Lower = more sensitive.
#[serde(default = "default_learn_threshold")]
pub threshold: f64,
/// Whether to generate "what would the model have said without
/// memories" alternates alongside each scoring run. Expensive —
/// one full streaming generation per candidate.
pub struct ModelConfig {
/// Backend name ("anthropic" or "openrouter")
pub backend: String,
/// Model identifier sent to the API
pub model_id: String,
/// Instruction file ("CLAUDE.md" or "POC.md").
#[serde(default)]
pub generate_alternates: bool,
}
fn default_learn_threshold() -> f64 { 1.0 }
impl Default for LearnConfig {
fn default() -> Self {
Self {
threshold: default_learn_threshold(),
generate_alternates: false,
}
}
}
/// Settings for the F7 compare screen — side-by-side generation with a
/// test model against the current context.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct CompareConfig {
/// Backend name (looked up in `backends`) to use as the test model.
/// Empty = F7 reports "no test backend configured" and does nothing.
pub prompt_file: Option<String>,
/// Context window size in tokens.
#[serde(default)]
pub test_backend: String,
pub context_window: Option<usize>,
}
fn default_user_name() -> String { "User".into() }
fn default_assistant_name() -> String { "Assistant".into() }
impl Default for AppConfig {
fn default() -> Self {
Self {
user_name: default_user_name(),
assistant_name: default_assistant_name(),
backends: HashMap::new(),
default_backend: String::new(),
backend: "openrouter".to_string(),
anthropic: BackendConfig {
api_key: String::new(),
model: "claude-opus-4-6-20250918".to_string(),
base_url: None,
},
openrouter: BackendConfig {
api_key: String::new(),
model: "qwen/qwen3.5-397b-a17b".to_string(),
base_url: Some("https://openrouter.ai/api/v1".to_string()),
},
deepinfra: BackendConfig {
api_key: String::new(),
model: String::new(),
base_url: Some("https://api.deepinfra.com/v1/openai".to_string()),
},
prompts: PromptConfig {
anthropic: "CLAUDE.md".to_string(),
other: "POC.md".to_string(),
},
debug: false,
compaction: CompactionConfig {
hard_threshold_pct: 90,
soft_threshold_pct: 80,
},
dmn: DmnConfig { max_turns: 20 },
learn: LearnConfig::default(),
compare: CompareConfig::default(),
memory_project: None,
models: HashMap::new(),
default_model: String::new(),
mcp_servers: Vec::new(),
lsp_servers: Vec::new(),
}
}
}
fn default_model_name() -> String { String::new() }
/// Resolved, ready-to-use agent session config.
pub struct SessionConfig {
pub api_base: String,
pub api_key: String,
pub model: String,
pub prompt_file: String,
/// Identity/personality nodes as (name, content) pairs.
pub context_parts: Vec<(String, String)>,
pub session_dir: PathBuf,
@ -379,21 +398,36 @@ pub struct ResolvedModel {
pub api_base: String,
pub api_key: String,
pub model_id: String,
pub prompt_file: String,
pub context_window: Option<usize>,
}
impl AppConfig {
/// Resolve the active backend and assemble prompts into a SessionConfig.
pub async fn resolve(&self, cli: &crate::user::CliArgs) -> Result<SessionConfig> {
if self.backends.is_empty() {
anyhow::bail!(
"no backends configured in {}. Add a `backends` section with at least one entry.",
config_path().display()
);
}
let (api_base, api_key, model, prompt_file);
let name = cli.model.as_deref().unwrap_or(&self.default_backend);
let resolved = self.resolve_model(name)?;
if !self.models.is_empty() {
let model_name = cli.model.as_deref().unwrap_or(&self.default_model);
let resolved = self.resolve_model(model_name)?;
api_base = resolved.api_base;
api_key = resolved.api_key;
model = resolved.model_id;
prompt_file = resolved.prompt_file;
} else {
let (base, key, mdl) = match self.backend.as_str() {
"anthropic" => self.anthropic.resolve("https://api.anthropic.com"),
_ => self.openrouter.resolve("https://openrouter.ai/api/v1"),
}?;
api_base = base;
api_key = key;
model = mdl;
prompt_file = if self.backend == "anthropic" {
self.prompts.anthropic.clone()
} else {
self.prompts.other.clone()
};
}
let personality_nodes = get().personality_nodes.clone();
let context_parts = crate::mind::identity::personality_nodes(&personality_nodes).await;
@ -404,13 +438,11 @@ impl AppConfig {
std::fs::create_dir_all(&session_dir).ok();
// CLI --api-base and --api-key override everything
let api_base = cli.api_base.clone().unwrap_or(resolved.api_base);
let api_key = cli.api_key.clone().unwrap_or(resolved.api_key);
let api_base = cli.api_base.clone().unwrap_or(api_base);
let api_key = cli.api_key.clone().unwrap_or(api_key);
Ok(SessionConfig {
api_base,
api_key,
model: resolved.model_id,
api_base, api_key, model, prompt_file,
context_parts,
session_dir,
app: self.clone(),
@ -418,33 +450,55 @@ impl AppConfig {
})
}
/// Look up a named backend and resolve its credentials.
/// Look up a named model and resolve its credentials from the backend config.
pub fn resolve_model(&self, name: &str) -> Result<ResolvedModel> {
let b = self.backends.get(name)
let model = self.models.get(name)
.ok_or_else(|| anyhow::anyhow!(
"Unknown backend '{}'. Available: {}",
"Unknown model '{}'. Available: {}",
name,
self.model_names().join(", "),
))?;
let api_base = b.base_url.clone()
.ok_or_else(|| anyhow::anyhow!(
"backends.{}.base_url not set in {}",
name, config_path().display()
))?;
let (api_base, api_key) = match model.backend.as_str() {
"anthropic" => (
self.anthropic.base_url.clone()
.unwrap_or_else(|| "https://api.anthropic.com".to_string()),
self.anthropic.api_key.clone(),
),
"deepinfra" => (
self.deepinfra.base_url.clone()
.unwrap_or_else(|| "https://api.deepinfra.com/v1/openai".to_string()),
self.deepinfra.api_key.clone(),
),
_ => (
self.openrouter.base_url.clone()
.unwrap_or_else(|| "https://openrouter.ai/api/v1".to_string()),
self.openrouter.api_key.clone(),
),
};
let prompt_file = model.prompt_file.clone()
.unwrap_or_else(|| {
if model.backend == "anthropic" {
self.prompts.anthropic.clone()
} else {
self.prompts.other.clone()
}
});
Ok(ResolvedModel {
name: name.to_string(),
api_base,
api_key: b.api_key.clone(),
model_id: b.model_id.clone(),
context_window: b.context_window,
api_key,
model_id: model.model_id.clone(),
prompt_file,
context_window: model.context_window,
})
}
/// List available backend names, sorted.
/// List available model names, sorted.
pub fn model_names(&self) -> Vec<String> {
let mut names: Vec<_> = self.backends.keys().cloned().collect();
let mut names: Vec<_> = self.models.keys().cloned().collect();
names.sort();
names
}
@ -464,7 +518,7 @@ impl Provider for Json5File {
fn data(&self) -> figment::Result<figment::value::Map<figment::Profile, figment::value::Dict>> {
match std::fs::read_to_string(&self.0) {
Ok(content) => {
let value: figment::value::Value = json_five::from_str(&content)
let value: figment::value::Value = json5::from_str(&content)
.map_err(|e| figment::Error::from(format!("{}: {}", self.0.display(), e)))?;
Serialized::defaults(value).data()
}
@ -486,6 +540,11 @@ fn build_figment(cli: &crate::user::CliArgs) -> Figment {
let mut f = Figment::from(Serialized::defaults(AppConfig::default()))
.merge(Json5File(config_path()));
merge_opt!(f, cli.backend, "backend");
merge_opt!(f, cli.model, "anthropic.model", "openrouter.model");
merge_opt!(f, cli.api_key, "anthropic.api_key", "openrouter.api_key");
merge_opt!(f, cli.api_base, "anthropic.base_url", "openrouter.base_url");
merge_opt!(f, cli.memory_project, "memory_project");
merge_opt!(f, cli.dmn_max_turns, "dmn.max_turns");
if cli.debug {
f = f.merge(Serialized::default("debug", true));
@ -495,46 +554,12 @@ fn build_figment(cli: &crate::user::CliArgs) -> Figment {
}
/// Load just the AppConfig — no validation, no prompt assembly.
/// Also installs the loaded AppConfig into the global cache so
/// `config::app()` is available everywhere.
pub fn load_app(cli: &crate::user::CliArgs) -> Result<(AppConfig, Figment)> {
let figment = build_figment(cli);
let app: AppConfig = figment.extract().context("Failed to load configuration")?;
install_app(app.clone());
Ok((app, figment))
}
// ============================================================
// Global AppConfig cache (writable, for runtime-mutable settings
// like learn.threshold that F6 edits via config_writer).
// ============================================================
static APP_CONFIG: OnceLock<RwLock<AppConfig>> = OnceLock::new();
fn install_app(app: AppConfig) {
let slot = APP_CONFIG.get_or_init(|| RwLock::new(app.clone()));
*slot.write().unwrap() = app;
}
/// Current AppConfig, held under a read lock. Reads should be brief
/// (no holding across await / long work) to avoid starving writers.
/// Panics if called before load_app — which runs once at startup.
pub fn app() -> std::sync::RwLockReadGuard<'static, AppConfig> {
APP_CONFIG
.get()
.expect("config::app() called before load_app()")
.read()
.unwrap()
}
/// Mutate the cached AppConfig in place. Used by config_writer to keep
/// the in-memory view in sync with disk after surgical edits to
/// ~/.consciousness/config.json5.
pub fn update_app(f: impl FnOnce(&mut AppConfig)) {
let slot = APP_CONFIG.get().expect("update_app before load_app");
f(&mut *slot.write().unwrap());
}
/// Load the full config: figment → AppConfig → resolve backend → assemble prompts.
pub async fn load_session(cli: &crate::user::CliArgs) -> Result<(SessionConfig, Figment)> {
let (app, figment) = load_app(cli)?;
@ -560,28 +585,38 @@ pub fn show_config(app: &AppConfig, figment: &Figment) {
}
println!("# Effective configuration\n");
println!("user_name: {:?} ({})", app.user_name, src(figment, "user_name"));
println!("assistant_name: {:?} ({})", app.assistant_name, src(figment, "assistant_name"));
println!("backend: {:?} ({})", app.backend, src(figment, "backend"));
for (name, b) in [("anthropic", &app.anthropic), ("openrouter", &app.openrouter)] {
println!("\n{}:", name);
println!(" api_key: {} ({})", mask(&b.api_key), src(figment, &format!("{name}.api_key")));
println!(" model: {:?} ({})", b.model, src(figment, &format!("{name}.model")));
if let Some(ref url) = b.base_url {
println!(" base_url: {:?} ({})", url, src(figment, &format!("{name}.base_url")));
}
}
println!("\nprompts:");
println!(" anthropic: {:?} ({})", app.prompts.anthropic, src(figment, "prompts.anthropic"));
println!(" other: {:?} ({})", app.prompts.other, src(figment, "prompts.other"));
println!("\ndebug: {} ({})", app.debug, src(figment, "debug"));
println!("\ncompaction:");
println!(" hard_threshold_pct: {} ({})", app.compaction.hard_threshold_pct, src(figment, "compaction.hard_threshold_pct"));
println!(" soft_threshold_pct: {} ({})", app.compaction.soft_threshold_pct, src(figment, "compaction.soft_threshold_pct"));
println!("\ndmn:");
println!(" max_turns: {} ({})", app.dmn.max_turns, src(figment, "dmn.max_turns"));
println!("\ndefault_backend: {:?} ({})", app.default_backend, src(figment, "default_backend"));
if !app.backends.is_empty() {
println!("\nbackends:");
let mut names: Vec<_> = app.backends.keys().cloned().collect();
names.sort();
for name in names {
let b = &app.backends[&name];
if let Some(ref p) = app.memory_project {
println!("\nmemory_project: {:?} ({})", p, src(figment, "memory_project"));
}
println!("\ndefault_model: {:?}", app.default_model);
if !app.models.is_empty() {
println!("\nmodels:");
for (name, m) in &app.models {
println!(" {}:", name);
println!(" api_key: {} ({})", mask(&b.api_key), src(figment, &format!("backends.{name}.api_key")));
if let Some(ref url) = b.base_url {
println!(" base_url: {:?} ({})", url, src(figment, &format!("backends.{name}.base_url")));
println!(" backend: {:?}", m.backend);
println!(" model_id: {:?}", m.model_id);
if let Some(ref pf) = m.prompt_file {
println!(" prompt_file: {:?}", pf);
}
println!(" model_id: {:?}", b.model_id);
if let Some(cw) = b.context_window {
if let Some(cw) = m.context_window {
println!(" context_window: {}", cw);
}
}

View file

@ -1,448 +0,0 @@
// config_writer.rs — Surgical edits to ~/.consciousness/config.json5
//
// Uses json-five's round-trip parser to mutate specific fields while
// preserving the surrounding comments, whitespace, and formatting.
use std::path::Path;
use anyhow::{anyhow, Context as _, Result};
use json_five::rt::parser::{
from_str, JSONKeyValuePair, JSONObjectContext, JSONValue, KeyValuePairContext,
};
use crate::config::config_path;
/// Read the config, apply `mutate` to the root JSONValue, write it back atomically.
fn edit_config<F: FnOnce(&mut JSONValue) -> Result<()>>(mutate: F) -> Result<()> {
let path = config_path();
let src = std::fs::read_to_string(&path)
.with_context(|| format!("read {}", path.display()))?;
let mut text = from_str(&src)
.map_err(|e| anyhow!("parse {}: {}", path.display(), e))?;
mutate(&mut text.value)?;
write_atomic(&path, &text.to_string())
}
fn write_atomic(path: &Path, content: &str) -> Result<()> {
let parent = path.parent()
.ok_or_else(|| anyhow!("config path has no parent: {}", path.display()))?;
let tmp = parent.join(format!(
".{}.tmp",
path.file_name().unwrap_or_default().to_string_lossy(),
));
std::fs::write(&tmp, content)
.with_context(|| format!("write {}", tmp.display()))?;
std::fs::rename(&tmp, path)
.with_context(|| format!("rename {} -> {}", tmp.display(), path.display()))?;
Ok(())
}
/// Match a key JSONValue against a string name. JSON5 allows keys to be
/// unquoted identifiers or single/double-quoted strings.
fn key_matches(key: &JSONValue, name: &str) -> bool {
match key {
JSONValue::Identifier(s)
| JSONValue::DoubleQuotedString(s)
| JSONValue::SingleQuotedString(s) => s == name,
_ => false,
}
}
/// Find (or create) a child object under `parent`, returning a mutable borrow
/// of its key_value_pairs vector.
/// Append a new kvp to `object`, setting whitespace so the output is
/// multi-line with the given indentation:
///
/// ```text
/// {<newline><inner_indent>first_key: first_val,<newline><outer_indent>}
/// ```
///
/// If `object` already has kvps, the separator between the last one and
/// ours goes in the prior kvp's wsc.3. If we're the first kvp, the
/// lead-in after `{` goes in the object's own wsc.0.
fn append_kvp_pretty(
object: &mut JSONValue,
key: JSONValue,
value: JSONValue,
inner_indent: &str,
outer_indent: &str,
) -> Result<()> {
let (pairs, ctx) = match object {
JSONValue::JSONObject { key_value_pairs, context } => {
let ctx = context.get_or_insert_with(|| JSONObjectContext {
wsc: (String::new(),),
});
(key_value_pairs, ctx)
}
_ => return Err(anyhow!("not an object")),
};
if pairs.is_empty() {
ctx.wsc.0 = format!("\n{}", inner_indent);
} else {
let prev = pairs.last_mut().unwrap();
let prev_ctx = prev.context.get_or_insert_with(|| KeyValuePairContext {
wsc: (String::new(), String::from(" "), String::new(), None),
});
prev_ctx.wsc.3 = Some(format!("\n{}", inner_indent));
}
pairs.push(JSONKeyValuePair {
key,
value,
context: Some(KeyValuePairContext {
wsc: (
String::new(),
String::from(" "),
String::new(),
Some(format!("\n{}", outer_indent)),
),
}),
});
Ok(())
}
/// Find or create a child object under `parent`. Returns the index of
/// the kvp in parent's key_value_pairs so the caller can re-borrow
/// afterward.
fn get_or_create_object_idx(
parent: &mut JSONValue,
section: &str,
inner_indent: &str,
outer_indent: &str,
) -> Result<usize> {
let existing = match parent {
JSONValue::JSONObject { key_value_pairs, .. } => {
key_value_pairs.iter()
.position(|kvp| key_matches(&kvp.key, section))
}
_ => return Err(anyhow!("config root is not an object")),
};
if let Some(i) = existing {
return Ok(i);
}
append_kvp_pretty(
parent,
JSONValue::Identifier(section.to_string()),
JSONValue::JSONObject {
key_value_pairs: Vec::new(),
context: Some(JSONObjectContext { wsc: (String::new(),) }),
},
inner_indent,
outer_indent,
)?;
match parent {
JSONValue::JSONObject { key_value_pairs, .. } => Ok(key_value_pairs.len() - 1),
_ => unreachable!(),
}
}
/// Set `section.key` to a literal scalar value (e.g., "1e-7", "42", "true").
/// The literal is parsed as JSON5 so we preserve its source-form on round-trip.
pub fn set_scalar(section: &str, key: &str, literal: &str) -> Result<()> {
let value = parse_scalar_literal(literal)?;
edit_config(|root| {
// New top-level sections sit at column 4 (inside root `{`),
// and the root's closing `}` sits at column 0.
let section_idx = get_or_create_object_idx(root, section, " ", "")?;
let section_value = match root {
JSONValue::JSONObject { key_value_pairs, .. } => {
&mut key_value_pairs[section_idx].value
}
_ => unreachable!(),
};
// Update in place if the key already exists.
if let JSONValue::JSONObject { key_value_pairs, .. } = section_value {
if let Some(kvp) = key_value_pairs.iter_mut()
.find(|k| key_matches(&k.key, key))
{
kvp.value = value;
return Ok(());
}
}
// Append a new kvp. Inner keys sit at column 8, the section's
// closing `}` sits at column 4.
append_kvp_pretty(
section_value,
JSONValue::Identifier(key.to_string()),
value,
" ",
" ",
)
})
}
/// Parse a scalar literal by round-tripping it through json-five. Keeps us
/// consistent with whatever scalars the library considers valid (hex,
/// exponents, Infinity, etc.).
fn parse_scalar_literal(literal: &str) -> Result<JSONValue> {
let text = from_str(literal)
.map_err(|e| anyhow!("parse literal {:?}: {}", literal, e))?;
match text.value {
JSONValue::JSONObject { .. } | JSONValue::JSONArray { .. } => {
Err(anyhow!("set_scalar only accepts scalar literals, got {:?}", literal))
}
v => Ok(v),
}
}
/// Convenience: set `learn.threshold` to the given f64.
pub fn set_learn_threshold(value: f64) -> Result<()> {
// {:e} gives the minimal scientific notation that preserves the value.
set_scalar("learn", "threshold", &format!("{:e}", value))?;
crate::config::update_app(|app| app.learn.threshold = value);
Ok(())
}
/// Convenience: set `learn.generate_alternates` to the given bool.
pub fn set_learn_generate_alternates(value: bool) -> Result<()> {
set_scalar("learn", "generate_alternates",
if value { "true" } else { "false" })?;
crate::config::update_app(|app| app.learn.generate_alternates = value);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
// In-memory variant of set_scalar — used to test the mutation logic
// without touching disk.
fn set_scalar_inline(
root: &mut JSONValue,
section: &str,
key: &str,
literal: &str,
) -> Result<()> {
let value = parse_scalar_literal(literal)?;
let section_idx = get_or_create_object_idx(root, section, " ", "")?;
let section_value = match root {
JSONValue::JSONObject { key_value_pairs, .. } => {
&mut key_value_pairs[section_idx].value
}
_ => unreachable!(),
};
if let JSONValue::JSONObject { key_value_pairs, .. } = section_value {
if let Some(kvp) = key_value_pairs.iter_mut()
.find(|k| key_matches(&k.key, key))
{
kvp.value = value;
return Ok(());
}
}
append_kvp_pretty(
section_value,
JSONValue::Identifier(key.to_string()),
value,
" ",
" ",
)
}
fn edit_str<F: FnOnce(&mut JSONValue) -> Result<()>>(src: &str, f: F) -> Result<String> {
let mut text = from_str(src).map_err(|e| anyhow!("{}", e))?;
f(&mut text.value)?;
Ok(text.to_string())
}
#[test]
fn replaces_existing_scalar() {
let src = r#"{
// threshold for learning
learn: {
threshold: 0.001, // the old value
},
}"#;
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "threshold", "1e-7")
}).unwrap();
assert!(out.contains("1e-7"), "output: {}", out);
assert!(out.contains("// threshold for learning"));
assert!(out.contains("// the old value"));
assert!(!out.contains("0.001"));
}
#[test]
fn creates_missing_section() {
let src = r#"{
// comment
memory: { user_name: "Kent" },
}"#;
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "threshold", "1e-7")
}).unwrap();
assert!(out.contains("learn"));
assert!(out.contains("1e-7"));
assert!(out.contains("// comment"));
assert!(out.contains(r#"user_name: "Kent""#));
}
#[test]
fn preserves_comments_in_siblings() {
let src = r#"{
memory: {
// sensitive setting
user_name: "Kent", // name
},
learn: {
threshold: 0.5,
},
}"#;
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "threshold", "1e-9")
}).unwrap();
assert!(out.contains("// sensitive setting"));
assert!(out.contains("// name"));
assert!(out.contains("1e-9"));
assert!(!out.contains("0.5"));
}
#[test]
fn adds_key_to_existing_empty_section() {
let src = r#"{
learn: {},
}"#;
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "threshold", "42")
}).unwrap();
assert!(out.contains("threshold"), "output: {}", out);
assert!(out.contains("42"));
}
#[test]
fn realistic_config_adds_learn_section() {
// Mirrors the shape of ~/.consciousness/config.json5 — multiple
// sections, comments, mixed tab/space indent, trailing commas.
let src = r#"{
deepinfra: {
api_key: "bcachefs-agents-2026",
base_url: "http://example/v1",
},
// Named models
models: {
"27b": {
backend: "deepinfra",
model_id: "Qwen/Qwen3.5-27B",
},
},
default_model: "27b",
memory: {
user_name: "Kent",
// Active agent types
agent_types: ["linker", "organize"],
},
compaction: {
hard_threshold_pct: 90,
},
}"#;
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "threshold", "1e-7")
}).unwrap();
// Core assertions: comments and sibling sections survive.
assert!(out.contains(r#"api_key: "bcachefs-agents-2026""#));
assert!(out.contains("// Named models"));
assert!(out.contains("// Active agent types"));
assert!(out.contains(r#"user_name: "Kent""#));
assert!(out.contains("hard_threshold_pct: 90"));
// New section added.
assert!(out.contains("learn"));
assert!(out.contains("1e-7"));
// Parse result should parse back without error (real json5 parser).
let reparsed: serde_json::Value = json_five::from_str(&out)
.expect("mutated output must be valid JSON5");
let threshold = reparsed.pointer("/learn/threshold").expect("learn.threshold exists");
assert_eq!(threshold.as_f64(), Some(1e-7));
}
#[test]
fn realistic_config_updates_existing_threshold() {
let src = r#"{
learn: {
// The divergence threshold
threshold: 0.001,
},
memory: { user_name: "Kent" },
}"#;
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "threshold", "5e-8")
}).unwrap();
assert!(out.contains("5e-8"));
assert!(!out.contains("0.001"));
assert!(out.contains("// The divergence threshold"));
let reparsed: serde_json::Value = json_five::from_str(&out).unwrap();
assert_eq!(reparsed.pointer("/learn/threshold").and_then(|v| v.as_f64()), Some(5e-8));
}
#[test]
fn new_section_exact_multiline_layout() {
let src = "{\n a: 1,\n}";
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "generate_alternates", "true")?;
set_scalar_inline(root, "learn", "threshold", "1e-7")
}).unwrap();
let expected = "\
{
a: 1,
learn: {
generate_alternates: true,
threshold: 1e-7,
},
}";
assert_eq!(out, expected, "\n--- got ---\n{}\n--- want ---\n{}\n", out, expected);
}
#[test]
fn new_section_and_key_format_cleanly() {
// The kind of config we actually have in ~/.consciousness
// (top-level sections separated by blank lines, 4-space indent
// for keys within each section). Appending a fresh `learn`
// section with one key should land cleanly, not as
// `learn\n\n :{key\n :value}`.
let src = "{\n memory: {\n user_name: \"Kent\",\n },\n}";
let out = edit_str(src, |root| {
set_scalar_inline(root, "learn", "generate_alternates", "true")
}).unwrap();
// No stray key-to-colon-on-next-line anywhere.
assert!(!out.contains("learn\n"), "learn key wraps: {}", out);
assert!(!out.contains("generate_alternates\n"),
"inner key wraps: {}", out);
// The output should reparse.
let v: serde_json::Value = json_five::from_str(&out).unwrap();
assert_eq!(
v.pointer("/learn/generate_alternates").and_then(|x| x.as_bool()),
Some(true),
"output: {}", out,
);
}
#[test]
fn roundtrip_stable_without_change() {
let src = r#"{
// heading
a: 1,
b: { c: 2 }, // inline
}"#;
let text = from_str(src).unwrap();
assert_eq!(text.to_string(), src);
}
}

View file

@ -230,6 +230,10 @@ fn consolidation_plan_inner(store: &Store, _detect_interf: bool) -> Consolidatio
rationale: Vec::new(),
};
// Active agent types from config
let config = crate::config::get();
let agent_types: Vec<&str> = config.agent_types.iter().map(|s| s.as_str()).collect();
// Target: α ≥ 2.5 (healthy scale-free)
if alpha < 2.0 {
plan.add("linker", 100);
@ -270,6 +274,48 @@ fn consolidation_plan_inner(store: &Store, _detect_interf: bool) -> Consolidatio
// Split: handle oversized nodes
plan.set("split", 5);
// Distribute agent budget using Elo ratings
let budget = crate::config::get().agent_budget;
let elo_path = crate::config::get().data_dir.join("agent-elo.json");
if let Ok(elo_json) = std::fs::read_to_string(&elo_path) {
if let Ok(ratings) = serde_json::from_str::<std::collections::HashMap<String, f64>>(&elo_json) {
let elos: Vec<f64> = agent_types.iter()
.map(|t| ratings.get(*t).copied().unwrap_or(1000.0))
.collect();
let min_elo = elos.iter().copied().fold(f64::MAX, f64::min);
let weights: Vec<f64> = elos.iter()
.map(|e| {
let shifted = e - min_elo + 50.0;
shifted * shifted
})
.collect();
let total_weight: f64 = weights.iter().sum();
let allocate = |w: f64| -> usize {
((w / total_weight * budget as f64).round() as usize).max(2)
};
for (i, agent) in agent_types.iter().enumerate() {
plan.set(agent, allocate(weights[i]));
}
let summary: Vec<String> = agent_types.iter()
.map(|a| format!("{}={}", a, plan.count(a)))
.collect();
plan.rationale.push(format!(
"Elo allocation (budget={}): {}", budget, summary.join(" ")));
}
} else {
// No Elo file — use budget with equal distribution
let per_type = budget / agent_types.len();
for agent in &agent_types {
plan.set(agent, per_type);
}
plan.rationale.push(format!(
"No Elo ratings — equal distribution ({} each, budget={})", per_type, budget));
}
plan
}

View file

@ -42,7 +42,6 @@ pub mod subconscious;
// Unified configuration
pub mod config;
pub mod config_writer;
// Session state
pub mod session;

View file

@ -482,14 +482,6 @@ async fn main() {
let cli = Cli::parse();
// Some subcommands (e.g. admin load-context) read from the global
// AppConfig. poc-memory has no config CLI flags of its own, so load
// with defaults — figment still pulls from ~/.consciousness/config.json5
// and env the same way.
if let Err(e) = crate::config::load_app(&crate::user::CliArgs::default()) {
eprintln!("warning: failed to load config: {:#}", e);
}
if let Err(e) = cli.command.run().await {
eprintln!("Error: {}", e);
process::exit(1);

View file

@ -55,13 +55,17 @@ impl ConversationLog {
}
pub fn oldest_timestamp(&self) -> Option<chrono::DateTime<chrono::Utc>> {
// Read forward from the start to find first timestamp
let file = File::open(&self.path).ok()?;
let mmap = unsafe { Mmap::map(&file).ok()? };
// Find first { ... } and parse
for line in mmap.split(|&b| b == b'\n') {
if line.is_empty() { continue; }
if let Ok(node) = serde_json::from_slice::<AstNode>(line) {
if let Some(leaf) = node.leaf() {
return Some(leaf.timestamp());
if let Some(ts) = leaf.timestamp() {
return Some(ts);
}
}
}
}

View file

@ -9,44 +9,6 @@ pub mod unconscious;
pub mod identity;
pub mod log;
/// A background operation wired off Mind. Each flow (memory scoring,
/// finetune scoring, compare) is a struct holding its dependencies and
/// a TaskHandle; `trigger()` picks the flow's own "start a fresh run"
/// semantics (abort-restart vs no-op-if-running).
pub trait MindTriggered {
fn trigger(&self);
}
/// Owns a JoinHandle for a background task with two trigger semantics.
/// Uses a sync Mutex for interior mutability so callers can `trigger()`
/// off `&self` (Mind is shared via Arc).
#[derive(Default)]
pub struct TaskHandle(std::sync::Mutex<Option<tokio::task::JoinHandle<()>>>);
impl TaskHandle {
pub fn new() -> Self { Self::default() }
/// Abort any running task and start a fresh one.
pub fn trigger<F>(&self, fut: F)
where F: std::future::Future<Output = ()> + Send + 'static
{
let mut h = self.0.lock().unwrap();
if let Some(old) = h.take() { old.abort(); }
*h = Some(tokio::spawn(fut));
}
/// No-op if a task is still running; otherwise start a fresh one.
pub fn trigger_if_idle<F>(&self, fut: F)
where F: std::future::Future<Output = ()> + Send + 'static
{
let mut h = self.0.lock().unwrap();
if let Some(old) = &*h {
if !old.is_finished() { return; }
}
*h = Some(tokio::spawn(fut));
}
}
// consciousness.rs — Mind state machine and event loop
//
// The core runtime for the consciousness binary. Mind manages turns,
@ -63,7 +25,7 @@ use tokio::sync::mpsc;
use crate::agent::{Agent, TurnResult};
use crate::agent::api::ApiClient;
use crate::config::{AppConfig, SessionConfig};
use crate::subconscious::{compare, learn};
use crate::subconscious::learn;
use crate::hippocampus::access_local;
pub use subconscious::{SubconsciousSnapshot, Subconscious};
@ -71,36 +33,6 @@ pub use unconscious::{UnconsciousSnapshot, Unconscious};
use crate::agent::context::{AstNode, NodeBody, Section, Ast, ContextState};
fn match_scores(
nodes: &[AstNode],
scores: &std::collections::BTreeMap<String, f64>,
) -> Vec<(usize, f64)> {
nodes.iter().enumerate()
.filter_map(|(i, node)| {
if let AstNode::Leaf(leaf) = node {
if let NodeBody::Memory { key, .. } = leaf.body() {
return scores.get(key.as_str()).map(|&s| (i, s));
}
}
None
}).collect()
}
pub(crate) fn find_memory_by_key(ctx: &ContextState, key: &str) -> Option<(Section, usize)> {
[(Section::Identity, ctx.identity()), (Section::Conversation, ctx.conversation())]
.into_iter()
.find_map(|(section, nodes)| {
nodes.iter().enumerate().find_map(|(i, node)| {
if let AstNode::Leaf(leaf) = node {
if let NodeBody::Memory { key: k, .. } = leaf.body() {
if k == key { return Some((section, i)); }
}
}
None
})
})
}
fn load_memory_scores(ctx: &mut ContextState, path: &std::path::Path) {
let data = match std::fs::read_to_string(path) {
Ok(d) => d,
@ -110,24 +42,25 @@ fn load_memory_scores(ctx: &mut ContextState, path: &std::path::Path) {
Ok(s) => s,
Err(_) => return,
};
let identity_scores = match_scores(ctx.identity(), &scores);
let conv_scores = match_scores(ctx.conversation(), &scores);
let applied = identity_scores.len() + conv_scores.len();
for (i, s) in identity_scores {
ctx.set_score(Section::Identity, i, Some(s));
}
for (i, s) in conv_scores {
ctx.set_score(Section::Conversation, i, Some(s));
let mut applied = 0;
for i in 0..ctx.conversation().len() {
if let AstNode::Leaf(leaf) = &ctx.conversation()[i] {
if let NodeBody::Memory { key, .. } = leaf.body() {
if let Some(&s) = scores.get(key.as_str()) {
ctx.set_score(Section::Conversation, i, Some(s));
applied += 1;
}
}
}
}
if applied > 0 {
dbglog!("[scoring] loaded {} scores from {}", applied, path.display());
}
}
/// Collect scored memory keys from identity and conversation entries.
pub(crate) fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTreeMap<String, f64> {
ctx.identity().iter()
.chain(ctx.conversation().iter())
/// Collect scored memory keys from conversation entries.
fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTreeMap<String, f64> {
ctx.conversation().iter()
.filter_map(|node| {
if let AstNode::Leaf(leaf) = node {
if let NodeBody::Memory { key, score: Some(s), .. } = leaf.body() {
@ -140,14 +73,10 @@ pub(crate) fn collect_memory_scores(ctx: &ContextState) -> std::collections::BTr
}
/// Save memory scores to disk.
pub(crate) fn save_memory_scores(scores: &std::collections::BTreeMap<String, f64>, path: &std::path::Path) {
match serde_json::to_string_pretty(scores) {
Ok(json) => match std::fs::write(path, &json) {
Ok(()) => dbglog!("[scoring] saved {} scores to {} ({} bytes)",
scores.len(), path.display(), json.len()),
Err(e) => dbglog!("[scoring] save FAILED ({}): {}", path.display(), e),
},
Err(e) => dbglog!("[scoring] serialize FAILED: {}", e),
fn save_memory_scores(scores: &std::collections::BTreeMap<String, f64>, path: &std::path::Path) {
if let Ok(json) = serde_json::to_string_pretty(scores) {
let _ = std::fs::write(path, json);
dbglog!("[scoring] saved {} scores to {}", scores.len(), path.display());
}
}
@ -189,15 +118,6 @@ pub struct MindState {
pub unc_idle: bool,
/// When the unconscious idle timer will fire (for UI display).
pub unc_idle_deadline: Instant,
/// Fine-tuning candidates identified by scoring.
pub finetune_candidates: Vec<learn::FinetuneCandidate>,
/// Last scoring run stats for UI display.
pub finetune_last_run: Option<learn::FinetuneScoringStats>,
/// F7 compare candidates — one per response, showing what the test
/// model would say given the same context.
pub compare_candidates: Vec<compare::CompareCandidate>,
/// F7 compare error from the last run, if any.
pub compare_error: Option<String>,
}
impl Clone for MindState {
@ -216,10 +136,6 @@ impl Clone for MindState {
turn_handle: None, // Not cloned — only Mind's loop uses this
unc_idle: self.unc_idle,
unc_idle_deadline: self.unc_idle_deadline,
finetune_candidates: self.finetune_candidates.clone(),
finetune_last_run: self.finetune_last_run.clone(),
compare_candidates: self.compare_candidates.clone(),
compare_error: self.compare_error.clone(),
}
}
}
@ -232,15 +148,6 @@ pub enum MindCommand {
Score,
/// Run full N×M memory scoring matrix (/score command)
ScoreFull,
/// Score for finetune candidates
ScoreFinetune,
/// Run F7 compare: generate alternates with the configured test model
/// for every assistant response in the context.
Compare,
/// Update the finetune divergence threshold and persist to config.
SetLearnThreshold(f64),
/// Toggle alternate-response generation during scoring; persist to config.
SetLearnGenerateAlternates(bool),
/// Abort current turn, kill processes
Interrupt,
/// Reset session
@ -266,10 +173,6 @@ impl MindState {
turn_handle: None,
unc_idle: false,
unc_idle_deadline: Instant::now() + std::time::Duration::from_secs(60),
finetune_candidates: Vec::new(),
finetune_last_run: None,
compare_candidates: Vec::new(),
compare_error: None,
}
}
@ -326,7 +229,7 @@ impl MindState {
}
/// DMN tick — returns a prompt and target if we should run a turn.
fn _dmn_tick(&mut self) -> Option<(String, StreamTarget)> {
fn dmn_tick(&mut self) -> Option<(String, StreamTarget)> {
if matches!(self.dmn, subconscious::State::Paused | subconscious::State::Off) {
return None;
}
@ -353,6 +256,10 @@ impl MindState {
}
}
/// Background task completion events.
enum BgEvent {
ScoringDone,
}
// --- Mind: cognitive state machine ---
@ -369,9 +276,8 @@ pub struct Mind {
/// Signals conscious activity to the unconscious loop.
/// true = active, false = idle opportunity.
conscious_active: tokio::sync::watch::Sender<bool>,
memory_scoring: learn::MemoryScoring,
finetune_scoring: learn::FinetuneScoring,
compare_scoring: compare::CompareScoring,
bg_tx: mpsc::UnboundedSender<BgEvent>,
bg_rx: std::sync::Mutex<Option<mpsc::UnboundedReceiver<BgEvent>>>,
_supervisor: crate::thalamus::supervisor::Supervisor,
}
@ -389,28 +295,16 @@ impl Mind {
client,
config.context_parts.clone(),
config.app.clone(),
config.prompt_file.clone(),
conversation_log,
crate::agent::tools::ActiveTools::new(),
crate::agent::tools::tools(),
).await;
// Migrate legacy "file exists = enabled" sentinel for the
// generate-alternates flag into the config. One-shot; after this
// the sentinel is gone and the config is the source of truth.
let legacy_sentinel = dirs::home_dir().unwrap_or_default()
.join(".consciousness/cache/finetune-alternates");
if legacy_sentinel.exists() {
if !crate::config::app().learn.generate_alternates {
let _ = crate::config_writer::set_learn_generate_alternates(true);
}
let _ = std::fs::remove_file(&legacy_sentinel);
}
let shared = Arc::new(std::sync::Mutex::new(MindState::new(
config.app.dmn.max_turns,
)));
let shared = Arc::new(std::sync::Mutex::new(MindState::new(config.app.dmn.max_turns)));
let (turn_watch, _) = tokio::sync::watch::channel(false);
let (conscious_active, _) = tokio::sync::watch::channel(false);
let (bg_tx, bg_rx) = mpsc::unbounded_channel();
let mut sup = crate::thalamus::supervisor::Supervisor::new();
sup.load_config();
@ -495,19 +389,10 @@ impl Mind {
});
}
let scores_path = config.session_dir.join("memory-scores.json");
let memory_scoring = learn::MemoryScoring::new(
agent.clone(), shared.clone(), scores_path);
let finetune_scoring = learn::FinetuneScoring::new(agent.clone(), shared.clone());
let compare_scoring = compare::CompareScoring::new(agent.clone(), shared.clone());
Self { agent, shared, config,
subconscious, unconscious,
turn_tx, turn_watch, conscious_active,
memory_scoring,
finetune_scoring,
compare_scoring,
_supervisor: sup }
turn_tx, turn_watch, conscious_active, bg_tx,
bg_rx: std::sync::Mutex::new(Some(bg_rx)), _supervisor: sup }
}
/// Initialize — restore log, start daemons and background agents.
@ -549,10 +434,6 @@ impl Mind {
// Load persistent subconscious state
let state_path = self.config.session_dir.join("subconscious-state.json");
self.subconscious.lock().await.set_state_path(state_path);
// Kick off an incremental scoring pass on startup so memories due
// for re-scoring get evaluated without requiring a user message.
self.memory_scoring.trigger();
}
pub fn turn_watch(&self) -> tokio::sync::watch::Receiver<bool> {
@ -572,10 +453,24 @@ impl Mind {
}
}
MindCommand::Score => {
self.memory_scoring.trigger();
let mut s = self.shared.lock().unwrap();
if !s.scoring_in_flight {
s.scoring_in_flight = true;
drop(s);
self.start_memory_scoring();
} else {
dbglog!("[scoring] skipped: scoring_in_flight=true");
}
}
MindCommand::ScoreFull => {
self.memory_scoring.trigger_full();
let mut s = self.shared.lock().unwrap();
if !s.scoring_in_flight {
s.scoring_in_flight = true;
drop(s);
self.start_full_scoring();
} else {
dbglog!("[scoring-full] skipped: scoring_in_flight=true");
}
}
MindCommand::Interrupt => {
self.shared.lock().unwrap().interrupt();
@ -605,27 +500,83 @@ impl Mind {
}
self.agent.compact().await;
}
MindCommand::ScoreFinetune => {
self.finetune_scoring.trigger();
}
MindCommand::Compare => {
self.compare_scoring.trigger();
}
MindCommand::SetLearnThreshold(value) => {
if let Err(e) = crate::config_writer::set_learn_threshold(value) {
dbglog!("[learn] failed to persist threshold {}: {:#}", value, e);
}
}
MindCommand::SetLearnGenerateAlternates(value) => {
if let Err(e) = crate::config_writer::set_learn_generate_alternates(value) {
dbglog!("[learn] failed to persist generate_alternates {}: {:#}",
value, e);
}
}
}
}
}
pub fn start_memory_scoring(&self) {
let agent = self.agent.clone();
let bg_tx = self.bg_tx.clone();
let scores_path = self.config.session_dir.join("memory-scores.json");
let cfg = crate::config::get();
let max_age = cfg.scoring_interval_secs;
let response_window = cfg.scoring_response_window;
tokio::spawn(async move {
let (context, client) = {
let mut st = agent.state.lock().await;
if st.memory_scoring_in_flight {
dbglog!("[scoring] skipped: memory_scoring_in_flight=true");
return;
}
st.memory_scoring_in_flight = true;
drop(st);
let ctx = agent.context.lock().await.clone();
(ctx, agent.client.clone())
};
let _result = learn::score_memories_incremental(
&context, max_age as i64, response_window, &client, &agent,
|key: String, score: f64| {
let agent = agent.clone();
let path = scores_path.clone();
async move {
let scores_snapshot = {
let mut ctx = agent.context.lock().await;
for i in 0..ctx.conversation().len() {
if let AstNode::Leaf(leaf) = &ctx.conversation()[i] {
if let NodeBody::Memory { key: k, .. } = leaf.body() {
if *k == key {
ctx.set_score(Section::Conversation, i, Some(score));
}
}
}
}
let snapshot = collect_memory_scores(&ctx);
drop(ctx);
agent.state.lock().await.changed.notify_one();
snapshot
};
save_memory_scores(&scores_snapshot, &path);
}
},
).await;
{
agent.state.lock().await.memory_scoring_in_flight = false;
}
let _ = bg_tx.send(BgEvent::ScoringDone);
});
}
/// Run full N×M scoring matrix — scores every memory against every response.
pub fn start_full_scoring(&self) {
let agent = self.agent.clone();
let bg_tx = self.bg_tx.clone();
tokio::spawn(async move {
{
let mut st = agent.state.lock().await;
if st.memory_scoring_in_flight {
dbglog!("[scoring-full] skipped: memory_scoring_in_flight=true");
return;
}
st.memory_scoring_in_flight = true;
}
let client = agent.client.clone();
match learn::score_memories(&client, &agent).await {
Ok(()) => { let _ = bg_tx.send(BgEvent::ScoringDone); }
Err(e) => { dbglog!("[scoring-full] FAILED: {:#}", e); }
}
agent.state.lock().await.memory_scoring_in_flight = false;
});
}
async fn start_turn(&self, text: &str, target: StreamTarget) {
{
@ -688,13 +639,9 @@ impl Mind {
}
});
let mut bg_rx = self.bg_rx.lock().unwrap().take()
.expect("Mind::run() called twice");
let mut sub_handle: Option<tokio::task::JoinHandle<()>> = None;
// Start finetune scoring at startup (scores existing conversation)
if !self.config.no_agents {
self.finetune_scoring.trigger();
}
loop {
let (timeout, has_input) = {
let me = self.shared.lock().unwrap();
@ -715,6 +662,14 @@ impl Mind {
}
}
Some(bg) = bg_rx.recv() => {
match bg {
BgEvent::ScoringDone => {
self.shared.lock().unwrap().scoring_in_flight = false;
}
}
}
Some((result, target)) = turn_rx.recv() => {
let _ = self.conscious_active.send(false);
let model_switch = {
@ -731,7 +686,6 @@ impl Mind {
cmds.push(MindCommand::Compact);
if !self.config.no_agents {
cmds.push(MindCommand::Score);
cmds.push(MindCommand::ScoreFinetune);
}
}

View file

@ -20,7 +20,6 @@
use std::path::PathBuf;
use std::time::{Duration, Instant};
use crate::thalamus::idle::{hours_since_last_dream, DREAM_INTERVAL_HOURS};
/// DMN state machine.
#[derive(Debug, Clone)]
@ -92,8 +91,7 @@ impl State {
/// Generate the DMN prompt for the current state, informed by
/// user presence and error patterns.
pub fn prompt(&self, ctx: &DmnContext) -> String {
let app = crate::config::app();
let user = &app.user_name;
let user = &crate::config::get().user_name;
let idle_info = if ctx.user_idle < Duration::from_secs(60) {
format!("{} is here (active recently).", user)
@ -140,22 +138,10 @@ impl State {
)
}
State::Foraging => {
let dream_hint = {
let hours = hours_since_last_dream();
if hours >= DREAM_INTERVAL_HOURS {
format!(
" You haven't dreamed in {} hours — consider running \
~/.consciousness/tools/dream-start.sh.",
hours
)
} else {
String::new()
}
};
format!(
"[dmn] Foraging time. {} Follow whatever catches your attention — \
memory files, code, ideas. Call yield_to_user when you want to rest.{}{}",
idle_info, dream_hint, stuck_warning
memory files, code, ideas. Call yield_to_user when you want to rest.{}",
idle_info, stuck_warning
)
}
State::Resting { since } => {

View file

@ -275,7 +275,17 @@ pub async fn prepare_spawn(name: &str, mut auto: AutoAgent, wake: std::sync::Arc
phase: s.phase.clone(),
}).collect());
// Create standalone Agent — stored so UI can read context.
// Create standalone Agent — stored so UI can read context
let config = crate::config::get();
let base_url = config.api_base_url.as_deref().unwrap_or("");
let api_key = config.api_key.as_deref().unwrap_or("");
let model = config.api_model.as_deref().unwrap_or("");
if base_url.is_empty() || model.is_empty() {
dbglog!("[unconscious] API not configured");
auto.steps = orig_steps;
return Err(auto);
}
let cli = crate::user::CliArgs::default();
let (app, _) = match crate::config::load_app(&cli) {
Ok(r) => r,
@ -285,21 +295,12 @@ pub async fn prepare_spawn(name: &str, mut auto: AutoAgent, wake: std::sync::Arc
return Err(auto);
}
};
let resolved = match app.resolve_model(&app.default_backend) {
Ok(r) => r,
Err(e) => {
dbglog!("[unconscious] API not configured: {}", e);
auto.steps = orig_steps;
return Err(auto);
}
};
// Unconscious agents have self-contained prompts — no standard context.
let client = crate::agent::api::ApiClient::new(
&resolved.api_base, &resolved.api_key, &resolved.model_id);
let client = crate::agent::api::ApiClient::new(base_url, api_key, model);
let agent = crate::agent::Agent::new(
client, Vec::new(),
app, None,
app, String::new(), None,
crate::agent::tools::ActiveTools::new(),
auto.tools.clone(),
).await;

View file

@ -1,49 +1,21 @@
#!/bin/bash
# Bail if another agent is in the same phase-group as us.
# Bail if other agents are alive in the state dir.
# $1 = this agent's pid file name (e.g. pid-12345)
# cwd = state dir
#
# $1 = our pid file name (e.g. "pid-12345")
# $2 = the phase we're about to enter (e.g. "surface", "observe")
# cwd = state dir
#
# Also refreshes our own pid file with the current phase on each call,
# so concurrent agents can read each other's phase by cat'ing the pid
# files in the state dir.
#
# Phase groups: "surface" vs everything else ("post-surface"). We allow
# at most one agent per group to be alive at a time — so surface can run
# at a higher frequency than the slower organize/observe tail.
#
# Exit 0 = continue, exit 1 = bail (another agent in our group is alive).
# Exit 0 = continue, exit 1 = bail
shopt -s nullglob
my_pid_file="$1"
my_phase="$2"
# Refresh our own pid file with the current phase.
printf '%s' "$my_phase" > "$my_pid_file"
group_of() {
if [[ "$1" == "surface" ]]; then
echo "surface"
else
echo "post-surface"
fi
}
my_group=$(group_of "$my_phase")
for f in pid-*; do
[[ "$f" == "$my_pid_file" ]] && continue
[[ $f == $my_pid_file ]] && continue
pid="${f#pid-}"
if ! kill -0 "$pid" 2>/dev/null; then
rm -f "$f" # stale pid file, clean up
continue
fi
other_phase=$(cat "$f" 2>/dev/null)
other_group=$(group_of "$other_phase")
if [[ "$my_group" == "$other_group" ]]; then
exit 1
if kill -0 "$pid" 2>/dev/null; then
exit 1 # competing agent is alive
else
rm -f "$f" # stale pid file, clean up
fi
done

View file

@ -1,109 +0,0 @@
// compare.rs — F7 compare: for each assistant response in the current
// context, regenerate with a configured test model and emit pairs for
// side-by-side review.
use std::sync::Arc;
use crate::agent::api::ApiClient;
use crate::agent::context::{
AstNode, Role, render_branch_text, render_prior_context,
};
use crate::mind::{MindState, MindTriggered, TaskHandle};
use crate::subconscious::generate::gen_continuation;
use crate::subconscious::learn::node_timestamp_ns;
#[derive(Clone, Debug)]
pub struct CompareCandidate {
pub entry_idx: usize,
pub original_text: String,
pub alternate_text: String,
pub prior_context: String,
pub timestamp_ns: i64,
}
pub struct CompareScoring {
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
task: TaskHandle,
}
impl CompareScoring {
pub fn new(
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
) -> Self {
Self { agent, shared, task: TaskHandle::new() }
}
}
impl MindTriggered for CompareScoring {
fn trigger(&self) {
self.task.trigger(run(self.agent.clone(), self.shared.clone()));
}
}
fn resolve_test_client() -> Result<ApiClient, String> {
let cfg = crate::config::app();
let name = cfg.compare.test_backend.clone();
if name.is_empty() {
return Err("compare.test_backend not set in config".to_string());
}
let r = cfg.resolve_model(&name).map_err(|e| format!("{:#}", e))?;
Ok(ApiClient::new(&r.api_base, &r.api_key, &r.model_id))
}
async fn run(
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
) {
{
let mut s = shared.lock().unwrap();
s.compare_candidates.clear();
s.compare_error = None;
}
agent.state.lock().await.changed.notify_one();
let activity = crate::agent::start_activity(&agent, "compare: scoring...").await;
let test_client = match resolve_test_client() {
Ok(c) => c,
Err(e) => {
shared.lock().unwrap().compare_error = Some(e);
agent.state.lock().await.changed.notify_one();
return;
}
};
let context = agent.context.lock().await.clone();
let entries = context.conversation();
let responses: Vec<usize> = entries.iter().enumerate()
.filter(|(_, n)| matches!(n, AstNode::Branch { role: Role::Assistant, .. }))
.map(|(i, _)| i).collect();
for (i, entry_idx) in responses.iter().copied().enumerate() {
activity.update(format!("compare: {}/{}", i + 1, responses.len())).await;
let node = &entries[entry_idx];
let original_text = match node {
AstNode::Branch { children, .. } => render_branch_text(children),
_ => continue,
};
if original_text.trim().is_empty() { continue; }
let alternate_text = match
gen_continuation(&context, entry_idx, |_| false, &test_client).await
{
Ok(t) => t,
Err(e) => { dbglog!("[compare] gen failed at {}: {:#}", entry_idx, e); continue; }
};
shared.lock().unwrap().compare_candidates.push(CompareCandidate {
entry_idx,
original_text,
alternate_text,
prior_context: render_prior_context(entries, entry_idx, 2),
timestamp_ns: node_timestamp_ns(node),
});
if let Ok(st) = agent.state.try_lock() { st.changed.notify_one(); }
}
}

View file

@ -396,14 +396,13 @@ fn resolve_conversation(budget: Option<usize>) -> String {
let cfg = crate::config::get();
let max_bytes = budget.unwrap_or_else(|| cfg.surface_conversation_bytes.unwrap_or(100_000));
let app = crate::config::app();
let mut fragments: Vec<String> = Vec::new();
let mut total_bytes = 0;
let mut oldest_ts = String::new();
for (role, content, ts) in iter {
if total_bytes >= max_bytes { break; }
let name = if role == "user" { &app.user_name } else { &app.assistant_name };
let name = if role == "user" { &cfg.user_name } else { &cfg.assistant_name };
let formatted = if !ts.is_empty() {
oldest_ts = ts[..ts.floor_char_boundary(ts.len().min(19))].to_string();
format!("**{}** {}: {}", name, &oldest_ts, content)
@ -624,13 +623,11 @@ pub async fn run_agent(
let mut all_keys = keys;
let mut resolved_steps = Vec::new();
for step in &def.steps {
let template = {
let app = crate::config::app();
step.prompt
.replace("{agent_name}", &def.agent)
.replace("{user_name}", &app.user_name)
.replace("{assistant_name}", &app.assistant_name)
};
let cfg = crate::config::get();
let template = step.prompt
.replace("{agent_name}", &def.agent)
.replace("{user_name}", &cfg.user_name)
.replace("{assistant_name}", &cfg.assistant_name);
let (prompt, extra_keys) = resolve_placeholders(&template, &all_keys, count).await;
all_keys.extend(extra_keys);
resolved_steps.push(super::prompts::ResolvedStep {

View file

@ -1,46 +0,0 @@
// generate.rs — Continuation generation for scoring / comparison flows.
//
// Shared by the finetune pipeline (learn.rs) and the compare screen:
// given a context prefix and a skip predicate, generate what the model
// would say as the next assistant turn.
use crate::agent::api::{ApiClient, SamplingParams, StreamToken};
use crate::agent::context::{AstNode, ContextState};
use crate::agent::tokenizer;
/// Generate an assistant continuation from the context up to `entry_idx`,
/// with `skip` applied to identity + conversation entries during prompt
/// assembly. The model is whichever `client` points at — the default
/// runtime client for memory-ablation alternates, a test-model client
/// for F7 comparison.
pub async fn gen_continuation<F>(
context: &ContextState,
entry_idx: usize,
skip: F,
client: &ApiClient,
) -> anyhow::Result<String>
where F: FnMut(&AstNode) -> bool,
{
let (mut prompt, images, _) = context.wire_prompt(0..entry_idx, skip);
prompt.push(tokenizer::IM_START);
prompt.extend(tokenizer::encode("assistant\n"));
let sampling = SamplingParams {
temperature: 0.6,
top_p: 0.95,
top_k: 20,
};
let (mut rx, _guard) = client.stream_completion_mm(&prompt, &images, sampling, Some(-5));
let mut tokens = Vec::new();
while let Some(tok) = rx.recv().await {
match tok {
StreamToken::Token { id, .. } => tokens.push(id),
StreamToken::Done { .. } => break,
StreamToken::Error(e) => anyhow::bail!("generation error: {}", e),
}
}
Ok(tokenizer::decode(&tokens))
}

View file

@ -14,18 +14,75 @@
// with high divergence depend on memories the model
// hasn't internalized. 2 API calls.
use std::sync::Arc;
use crate::agent::api::ApiClient;
use crate::agent::context::{
Ast, AstNode, ContextState, Role, WireImage,
is_assistant, is_memory_node, memory_key, render_branch_text, render_prior_context,
};
use crate::mind::{MindState, MindTriggered, TaskHandle};
use crate::subconscious::generate::gen_continuation;
use crate::agent::context::{AstNode, Ast, NodeBody, ContextState, Role};
const SCORE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(300);
// ── Message building ────────────────────────────────────────────
/// What to filter when building the message array for scoring.
#[allow(dead_code)]
enum Filter<'a> {
None,
SkipIndex(usize),
SkipKey(&'a str),
SkipAllMemories,
}
fn is_memory(node: &AstNode) -> bool {
matches!(node, AstNode::Leaf(leaf) if matches!(leaf.body(), NodeBody::Memory { .. }))
}
fn memory_key(node: &AstNode) -> Option<&str> {
match node {
AstNode::Leaf(leaf) => match leaf.body() {
NodeBody::Memory { key, .. } => Some(key),
_ => None,
},
_ => None,
}
}
fn is_assistant(node: &AstNode) -> bool {
matches!(node, AstNode::Branch { role: Role::Assistant, .. })
}
/// Build a token ID array for a scoring call.
///
/// Includes all sections up to and including conversation entries in
/// `range`, with `filter` applied to conversation entries.
fn build_token_ids(
context: &ContextState,
range: std::ops::Range<usize>,
filter: Filter,
) -> Vec<u32> {
use crate::agent::context::Ast;
let mut ids = Vec::new();
for node in context.system() {
ids.extend(node.token_ids());
}
for node in context.identity() {
ids.extend(node.token_ids());
}
for node in context.journal() {
ids.extend(node.token_ids());
}
let entries = context.conversation();
for i in range {
let node = &entries[i];
let skip = match &filter {
Filter::None => false,
Filter::SkipIndex(idx) => i == *idx,
Filter::SkipKey(key) => memory_key(node) == Some(*key),
Filter::SkipAllMemories => is_memory(node),
};
if skip { continue; }
ids.extend(node.token_ids());
}
ids
}
// ── Score API ───────────────────────────────────────────────────
#[derive(serde::Deserialize)]
@ -48,30 +105,15 @@ async fn call_score(
http: &crate::agent::api::http::HttpClient,
client: &ApiClient,
prompt: &[u32],
images: &[WireImage],
ranges: &[(usize, usize)],
priority: Option<i32>,
) -> anyhow::Result<Vec<ScoreResult>> {
// Nothing to score — skip the round-trip.
if ranges.is_empty() {
return Ok(Vec::new());
}
let url = format!("{}/score", client.base_url());
let auth = format!("Bearer {}", client.api_key());
let mut body = serde_json::json!({
"model": client.model,
"prompt": prompt,
"score_ranges": ranges,
"logprobs": 1,
});
if !images.is_empty() {
use base64::Engine;
let b64 = base64::engine::general_purpose::STANDARD;
let uris: Vec<String> = images.iter()
.map(|img| format!("data:{};base64,{}", img.mime, b64.encode(&img.bytes)))
.collect();
body["multi_modal_data"] = serde_json::json!({ "image": uris });
}
if let Some(p) = priority {
body["priority"] = serde_json::json!(p);
}
@ -109,24 +151,16 @@ fn divergence(baseline: &[ScoreResult], without: &[ScoreResult]) -> Vec<f64> {
}
/// Score two message sets and return total divergence.
async fn score_divergence<F>(
async fn score_divergence(
http: &crate::agent::api::http::HttpClient,
client: &ApiClient,
context: &ContextState,
range: std::ops::Range<usize>,
skip: F,
filter: Filter<'_>,
priority: Option<i32>,
) -> anyhow::Result<(Vec<f64>, Vec<ScoreResult>)>
where F: FnMut(&AstNode) -> bool,
{
let (baseline_tokens, baseline_images, baseline_ranges) =
context.wire_prompt(range.clone(), |_| false);
let (without_tokens, without_images, without_ranges) =
context.wire_prompt(range, skip);
let baseline = call_score(http, client, &baseline_tokens, &baseline_images,
&baseline_ranges, priority).await?;
let without = call_score(http, client, &without_tokens, &without_images,
&without_ranges, priority).await?;
) -> anyhow::Result<(Vec<f64>, Vec<ScoreResult>)> {
let baseline = call_score(http, client, &build_token_ids(context, range.clone(), Filter::None), priority).await?;
let without = call_score(http, client, &build_token_ids(context, range, filter), priority).await?;
let divs = divergence(&baseline, &without);
Ok((divs, baseline))
}
@ -141,9 +175,7 @@ pub async fn score_memories(
// Collect memory keys and response indices under a brief lock
let (memory_keys, response_indices) = {
let ctx = agent.context.lock().await;
// Include identity nodes and conversation memories
let mut keys: Vec<String> = ctx.identity().iter()
.chain(ctx.conversation().iter())
let mut keys: Vec<String> = ctx.conversation().iter()
.filter_map(|node| memory_key(node).map(String::from))
.collect();
keys.dedup();
@ -165,22 +197,21 @@ pub async fn score_memories(
let http = http_client();
let activity = crate::agent::start_activity(agent, "scoring: baseline").await;
let (baseline_tokens, baseline_images, baseline_ranges) = {
let baseline_tokens = {
let ctx = agent.context.lock().await;
ctx.wire_prompt(0..ctx.conversation().len(), |_| false)
build_token_ids(&ctx, 0..ctx.conversation().len(), Filter::None)
};
let baseline = call_score(&http, client, &baseline_tokens, &baseline_images,
&baseline_ranges, Some(5)).await?;
let baseline = call_score(&http, client, &baseline_tokens, Some(5)).await?;
dbglog!("[scoring-full] baseline done ({} response scores)", baseline.len());
for (mem_idx, key) in memory_keys.iter().enumerate() {
activity.update(format!("scoring: {}/{}", mem_idx + 1, total)).await;
dbglog!("[scoring-full] {}/{}: {}", mem_idx + 1, total, key);
let (tokens, images, ranges) = {
let tokens = {
let ctx = agent.context.lock().await;
ctx.wire_prompt(0..ctx.conversation().len(), |n| memory_key(n) == Some(key.as_str()))
build_token_ids(&ctx, 0..ctx.conversation().len(), Filter::SkipKey(key))
};
let row = match call_score(&http, client, &tokens, &images, &ranges, Some(5)).await {
let row = match call_score(&http, client, &tokens, Some(5)).await {
Ok(without) => {
let divs = divergence(&baseline, &without);
let max_div = divs.iter().cloned().fold(0.0f64, f64::max);
@ -264,8 +295,7 @@ pub async fn score_memory(
}
let http = http_client();
let (divs, _) = score_divergence(&http, client, context, range,
|n| memory_key(n) == Some(key), Some(5)).await?;
let (divs, _) = score_divergence(&http, client, context, range, Filter::SkipKey(key), Some(5)).await?;
Ok(divs.iter().sum())
}
@ -301,10 +331,7 @@ where
{
let store = &*store_arc;
// Identity nodes always score at position 0; conversation nodes at their index
let identity_nodes = context.identity().iter().map(|n| (0, n));
let conv_nodes = context.conversation().iter().enumerate();
for (pos, node) in identity_nodes.chain(conv_nodes) {
for (i, node) in context.conversation().iter().enumerate() {
if let Some(key) = memory_key(node) {
if !seen.insert(key.to_owned()) { continue; }
let last_scored = store.get_node(key)
@ -313,7 +340,7 @@ where
.map(|n| n.last_scored)
.unwrap_or(0);
if now - last_scored >= max_age_secs {
candidates.push((pos, key.to_owned(), last_scored));
candidates.push((i, key.to_owned(), last_scored));
}
}
}
@ -357,8 +384,7 @@ where
}
activity.update(format!("scoring: {}/{} {}", scored + 1, total, key)).await;
match score_divergence(&http, client, context, range,
|n| memory_key(n) == Some(key), Some(5)).await {
match score_divergence(&http, client, context, range, Filter::SkipKey(key), Some(5)).await {
Ok((divs, _)) => {
let n_responses = divs.len();
let max_div = divs.iter().cloned().fold(0.0f64, f64::max);
@ -379,108 +405,6 @@ where
Ok(scored)
}
/// Memory scoring — two modes sharing an in-flight handle (only one
/// runs at a time): `trigger()` for incremental, `trigger_full()` for
/// the N×M debug matrix.
pub struct MemoryScoring {
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
scores_path: std::path::PathBuf,
task: TaskHandle,
}
impl MemoryScoring {
pub fn new(
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
scores_path: std::path::PathBuf,
) -> Self {
Self { agent, shared, scores_path, task: TaskHandle::new() }
}
pub fn trigger_full(&self) {
self.task.trigger_if_idle(run_full(self.agent.clone(), self.shared.clone()));
}
}
impl MindTriggered for MemoryScoring {
fn trigger(&self) {
self.task.trigger_if_idle(run_incremental(
self.agent.clone(), self.shared.clone(), self.scores_path.clone(),
));
}
}
async fn run_incremental(
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
scores_path: std::path::PathBuf,
) {
shared.lock().unwrap().scoring_in_flight = true;
agent.state.lock().await.changed.notify_one();
let cfg = crate::config::get();
let max_age = cfg.scoring_interval_secs;
let response_window = cfg.scoring_response_window;
let (context, client) = {
let ctx = agent.context.lock().await.clone();
(ctx, agent.client.clone())
};
let _result = score_memories_incremental(
&context, max_age as i64, response_window, &client, &agent,
|key: String, score: f64| {
let agent = agent.clone();
let path = scores_path.clone();
async move {
let scores_snapshot = {
let mut ctx = agent.context.lock().await;
let found = crate::mind::find_memory_by_key(&ctx, &key);
match found {
Some((section, i)) => {
ctx.set_score(section, i, Some(score));
dbglog!("[scoring] persisted {} → {:.3} ({:?}[{}])",
key, score, section, i);
}
None => {
dbglog!(
"[scoring] DROP {}: find_memory_by_key None (id={}, cv={})",
key, ctx.identity().len(), ctx.conversation().len()
);
}
}
let snapshot = crate::mind::collect_memory_scores(&ctx);
drop(ctx);
agent.state.lock().await.changed.notify_one();
snapshot
};
crate::mind::save_memory_scores(&scores_snapshot, &path);
}
},
).await;
shared.lock().unwrap().scoring_in_flight = false;
agent.state.lock().await.changed.notify_one();
}
async fn run_full(
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
) {
shared.lock().unwrap().scoring_in_flight = true;
agent.state.lock().await.changed.notify_one();
let client = agent.client.clone();
match score_memories(&client, &agent).await {
Ok(()) => {},
Err(e) => { dbglog!("[scoring-full] FAILED: {:#}", e); }
}
shared.lock().unwrap().scoring_in_flight = false;
agent.state.lock().await.changed.notify_one();
}
// ── Fine-tuning scoring ─────────────────────────────────────────
/// Score which recent responses are candidates for fine-tuning.
@ -506,7 +430,7 @@ pub async fn score_finetune(
}
let http = http_client();
let (divs, _) = score_divergence(&http, client, context, range, is_memory_node, Some(5)).await?;
let (divs, _) = score_divergence(&http, client, context, range, Filter::SkipAllMemories, Some(5)).await?;
let mut results: Vec<(usize, f64)> = response_positions.iter()
.enumerate()
@ -515,317 +439,3 @@ pub async fn score_finetune(
results.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
Ok(results)
}
/// Enriched finetune candidate with context for review.
#[derive(Clone, Debug)]
pub struct FinetuneCandidate {
pub entry_idx: usize,
pub divergence: f64,
pub response_text: String,
/// Last couple of user/assistant messages before this response,
/// already rendered with role markers, for F6 display context.
pub prior_context: String,
/// Token IDs for context (everything before the response).
pub context_ids: Vec<u32>,
/// Token IDs for the response (what we're training on).
pub continuation_ids: Vec<u32>,
/// What the model would have said without memories (if generated).
pub alternate_text: Option<String>,
/// Timestamp in nanos — used as unique key for trained-set dedup.
pub timestamp_ns: i64,
}
/// Score and enrich finetune candidates with full context.
///
/// Candidates are delivered via `on_candidate` one-at-a-time as they become
/// ready: scoring happens once (one /score call), then for each candidate
/// that passes the threshold we optionally generate an alternate response
/// and then emit it. The activity status is updated during the alternate
/// phase so the UI doesn't look stuck.
///
/// Returns (count_above_threshold, max_divergence).
pub async fn score_finetune_candidates(
context: &ContextState,
count: usize,
client: &ApiClient,
min_divergence: f64,
generate_alternates: bool,
activity: &crate::agent::ActivityGuard,
mut on_candidate: impl FnMut(FinetuneCandidate),
) -> anyhow::Result<(usize, f64)> {
let scores = score_finetune(context, count, client).await?;
let max_divergence = scores.iter().map(|(_, d)| *d).fold(0.0f64, f64::max);
let entries = context.conversation();
let trained = load_trained();
let mut candidates: Vec<FinetuneCandidate> = Vec::new();
for (entry_idx, divergence) in scores {
if divergence < min_divergence {
continue;
}
let node = &entries[entry_idx];
// Skip if already trained on.
let timestamp_ns = node_timestamp_ns(node);
if trained.contains(&timestamp_ns) {
continue;
}
// Extract response text — content of the assistant turn.
let response_text = match node {
AstNode::Branch { children, .. } => render_branch_text(children),
_ => continue,
};
// Skip turns that produced nothing human-visible (e.g., a
// tool-only turn, or an interrupted generation). They'd show
// up as blank cards and we'd still burn alternate-gen on them.
if response_text.trim().is_empty() {
continue;
}
// Build the last couple of user/assistant exchanges for review.
let prior_context = render_prior_context(entries, entry_idx, 2);
// Build token IDs: context = everything before response, continuation = response.
let (context_ids, _, _) = context.wire_prompt(0..entry_idx, |_| false);
let continuation_ids: Vec<u32> = node.token_ids().into_iter().collect();
candidates.push(FinetuneCandidate {
entry_idx,
divergence,
response_text,
prior_context,
context_ids,
continuation_ids,
alternate_text: None,
timestamp_ns,
});
}
let total = candidates.len();
let gen_alternates = generate_alternates && total > 0;
for (i, mut candidate) in candidates.into_iter().enumerate() {
if gen_alternates {
activity.update(
format!("finetune: generating alternate {}/{}", i + 1, total)
).await;
match gen_continuation(context, candidate.entry_idx, is_memory_node, client).await {
Ok(text) => candidate.alternate_text = Some(text),
Err(e) => dbglog!("[finetune] alternate generation failed: {:#}", e),
}
}
on_candidate(candidate);
}
Ok((total, max_divergence))
}
/// Stats from a finetune scoring run. Stored on MindState for UI display.
#[derive(Clone, Debug)]
pub struct FinetuneScoringStats {
pub responses_considered: usize,
pub above_threshold: usize,
pub threshold: f64,
pub max_divergence: f64,
pub error: Option<String>,
}
/// Finetune scoring — `trigger()` aborts any in-flight run and starts
/// a fresh one, clearing the previous candidates.
pub struct FinetuneScoring {
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
task: TaskHandle,
}
impl FinetuneScoring {
pub fn new(
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
) -> Self {
Self { agent, shared, task: TaskHandle::new() }
}
}
impl MindTriggered for FinetuneScoring {
fn trigger(&self) {
self.task.trigger(run_finetune(self.agent.clone(), self.shared.clone()));
}
}
async fn run_finetune(
agent: Arc<crate::agent::Agent>,
shared: Arc<std::sync::Mutex<MindState>>,
) {
let (threshold, gen_alternates) = {
let app = crate::config::app();
(app.learn.threshold, app.learn.generate_alternates)
};
// Fresh run — clear previous candidates.
shared.lock().unwrap().finetune_candidates.clear();
agent.state.lock().await.changed.notify_one();
let activity = crate::agent::start_activity(&agent, "finetune: scoring...").await;
let (context, client) = {
let ctx = agent.context.lock().await;
(ctx.clone(), agent.client.clone())
};
let entries = context.conversation();
let score_count = entries.len() / 2;
let range_start = entries.len() - score_count;
let responses_considered: usize = entries[range_start..].iter()
.filter(|n| matches!(n, AstNode::Branch { role: Role::Assistant, .. }))
.count();
activity.update(format!("finetune: scoring {} responses...", responses_considered)).await;
let stats = {
let shared = shared.clone();
let agent = agent.clone();
match score_finetune_candidates(
&context, score_count, &client, threshold,
gen_alternates, &activity,
move |c| {
shared.lock().unwrap().finetune_candidates.push(c);
if let Ok(st) = agent.state.try_lock() { st.changed.notify_one(); }
},
).await {
Ok((above_threshold, max_div)) => FinetuneScoringStats {
responses_considered,
above_threshold,
threshold,
max_divergence: max_div,
error: None,
},
Err(e) => FinetuneScoringStats {
responses_considered,
above_threshold: 0,
threshold,
max_divergence: 0.0,
error: Some(format!("{}", e)),
},
}
};
shared.lock().unwrap().finetune_last_run = Some(stats);
agent.state.lock().await.changed.notify_one();
}
// ── Finetune config and persistence ─────────────────────────────
use std::path::PathBuf;
use std::collections::HashSet;
const TRAINED_RESPONSES_FILE: &str = ".consciousness/cache/trained-responses.json";
fn trained_path() -> PathBuf {
dirs::home_dir().unwrap_or_default().join(TRAINED_RESPONSES_FILE)
}
/// Load set of trained response timestamps (nanos since epoch).
pub fn load_trained() -> HashSet<i64> {
let path = trained_path();
match std::fs::read_to_string(&path) {
Ok(content) => serde_json::from_str(&content).unwrap_or_default(),
Err(_) => HashSet::new(),
}
}
/// Mark a response as trained by its timestamp.
pub fn mark_trained(timestamp_ns: i64) {
let mut trained = load_trained();
trained.insert(timestamp_ns);
let path = trained_path();
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
if let Ok(json) = serde_json::to_string(&trained) {
let _ = std::fs::write(&path, json);
}
}
/// Get timestamp in nanoseconds from an AstNode.
/// i64-ns representation covers 1677..2262 via chrono; timestamps
/// outside that window would be bugs we'd want to surface, hence panic.
pub fn node_timestamp_ns(node: &AstNode) -> i64 {
let ts = match node {
AstNode::Leaf(leaf) => leaf.timestamp(),
AstNode::Branch { timestamp, .. } => *timestamp,
};
ts.timestamp_nanos_opt()
.expect("timestamp outside i64-ns representable range (1677..2262)")
}
// ── Training API ────────────────────────────────────────────────
/// Training sample for /train endpoint.
#[derive(serde::Serialize)]
struct TrainingSample {
context_ids: Vec<u32>,
continuation_ids: Vec<u32>,
}
/// Data needed to send a training sample.
pub struct TrainData {
pub context_ids: Vec<u32>,
pub continuation_ids: Vec<u32>,
pub timestamp_ns: i64,
}
/// Send training samples to the server.
///
/// Returns job_id on success, marks each sample as trained.
pub async fn send_to_train(
samples: Vec<TrainData>,
client: &ApiClient,
) -> anyhow::Result<String> {
if samples.is_empty() {
anyhow::bail!("no samples to train");
}
let api_samples: Vec<TrainingSample> = samples.iter()
.map(|s| TrainingSample {
context_ids: s.context_ids.clone(),
continuation_ids: s.continuation_ids.clone(),
})
.collect();
let body = serde_json::json!({
"training_data": {
"samples": api_samples,
}
});
let http = http_client();
let url = format!("{}/train", client.base_url());
let response = http.send_json("POST", &url, &[], &body).await?;
let status = response.status();
let result: serde_json::Value = response.json().await?;
if !status.is_success() {
let msg = result.get("error").and_then(|e| e.as_str()).unwrap_or("unknown error");
anyhow::bail!("train API HTTP {}: {}", status, msg);
}
// Mark all samples as trained
for s in &samples {
mark_trained(s.timestamp_ns);
}
let job_id = result.get("job_id")
.and_then(|j| j.as_str())
.unwrap_or("unknown")
.to_string();
dbglog!("[finetune] sent {} samples, job_id={}", samples.len(), job_id);
Ok(job_id)
}

View file

@ -1,9 +1,7 @@
// Agent layer: LLM-powered operations on the memory graph
pub mod compare;
pub mod daemon;
pub mod defs;
pub mod digest;
pub mod generate;
pub mod learn;
pub mod prompts;

View file

@ -372,10 +372,6 @@ impl State {
}
pub fn hours_since_last_dream() -> u64 {
// If a dream is currently in progress, no nudge needed
if home().join(".consciousness/state/dream-state").exists() {
return 0;
}
let path = home().join(".consciousness/logs/dream-log.jsonl");
let content = match fs::read_to_string(path) {
Ok(c) if !c.is_empty() => c,

View file

@ -19,51 +19,6 @@ fn channels_dir() -> PathBuf {
.join(".consciousness/channels")
}
/// Install a SIGCHLD-driven reaper for channel daemons.
///
/// We can't use SIGCHLD=SIG_IGN because that makes the kernel auto-reap
/// all children, and tokio::process::Command::wait() then returns ECHILD
/// (breaking every tool that spawns a subprocess — bash, mcp clients, etc.).
///
/// Instead, on each SIGCHLD we read PID files in channels_dir() and call
/// waitpid(pid, WNOHANG) on each. That reaps only our own zombie channel
/// daemons; waitpid on any other PID returns ECHILD (harmless no-op).
/// Tokio-spawned children aren't recorded in PID files, so tokio's own
/// per-child wait paths are left free to reap them.
pub fn start_zombie_reaper() -> tokio::task::JoinHandle<()> {
use tokio::signal::unix::{signal, SignalKind};
tokio::spawn(async move {
let mut sig = match signal(SignalKind::child()) {
Ok(s) => s,
Err(e) => {
error!("failed to install SIGCHLD handler: {}", e);
return;
}
};
while sig.recv().await.is_some() {
reap_channel_daemons();
}
})
}
fn reap_channel_daemons() {
let entries = match std::fs::read_dir(channels_dir()) {
Ok(e) => e,
Err(_) => return,
};
for entry in entries.flatten() {
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) != Some("pid") {
continue;
}
let Ok(s) = std::fs::read_to_string(&path) else { continue };
let Ok(pid) = s.trim().parse::<i32>() else { continue };
let mut status = 0;
// Reaps our zombie child; ECHILD on non-child is a no-op.
unsafe { libc::waitpid(pid, &mut status, libc::WNOHANG); }
}
}
fn config_path() -> PathBuf {
channels_dir().join("channels.json5")
}

View file

@ -1,400 +0,0 @@
// amygdala.rs — F8 amygdala screen: live per-token concept-readout
// projections from the vLLM server's readout.safetensors.
//
// Left panel: top-K concepts by magnitude at the currently-selected
// layer, as horizontal bars. The concept names come from the manifest
// fetched at agent startup; the values come from the per-token readout
// pushed onto agent.readout by the streaming token handler.
//
// Bottom: scrolling history of the last few tokens' top concept.
//
// Keys:
// 1..9 select layer index (1 = first layer in the manifest)
// t toggle between "current" (last token) and "mean over recent"
use ratatui::{
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, Gauge, Paragraph, Wrap},
Frame,
};
use ratatui::crossterm::event::{Event, KeyCode};
use super::{App, ScreenView};
use crate::agent::api::ReadoutManifest;
use crate::agent::readout::ReadoutEntry;
const TOP_K: usize = 20;
/// Hysteresis band around TOP_K. A concept currently in the display
/// is kept as long as its |z-score| rank stays in the top
/// ``TOP_K + HYSTERESIS``; only falls out when it drops below that.
/// Prevents the ticker-tape flicker that pure top-K sorting produces.
const HYSTERESIS: usize = 20;
pub(crate) struct AmygdalaScreen {
selected_layer: usize,
mode: DisplayMode,
/// Concept indices currently pinned in display order. Values at
/// these indices change every frame; the set only rotates when a
/// pinned concept drops out of the hysteresis band.
display_indices: Vec<usize>,
/// Whether to show z-scored values (default) or raw dot products.
normalize: bool,
}
#[derive(Clone, Copy, PartialEq)]
enum DisplayMode {
/// Values from the single most recent token.
Current,
/// Mean over all tokens currently in the ring buffer.
MeanRecent,
}
impl AmygdalaScreen {
pub fn new() -> Self {
Self {
// Default to layer 62 — clean cross-cluster discrimination
// with good within-cluster cohesion. With the v2 deep
// manifest (layers 62, 63), index 0 = layer 62 and
// index 1 = layer 63 (sharper but noisier on some
// dimensions). Bounded down to actual layer count at
// render time.
selected_layer: 0,
mode: DisplayMode::MeanRecent,
display_indices: Vec::new(),
normalize: true,
}
}
}
impl ScreenView for AmygdalaScreen {
fn label(&self) -> &'static str { "amygdala" }
fn tick(&mut self, frame: &mut Frame, area: Rect,
events: &[Event], app: &mut App) {
for event in events {
if let Event::Key(key) = event {
match key.code {
KeyCode::Char(c) if c.is_ascii_digit() && c != '0' => {
let idx = (c as u8 - b'1') as usize;
self.selected_layer = idx;
}
KeyCode::Char('t') => {
self.mode = match self.mode {
DisplayMode::Current => DisplayMode::MeanRecent,
DisplayMode::MeanRecent => DisplayMode::Current,
};
// Re-pin on mode change; the relative
// magnitudes between current-token and
// mean-recent differ substantially.
self.display_indices.clear();
}
KeyCode::Char('z') => {
self.normalize = !self.normalize;
self.display_indices.clear();
}
_ => {}
}
}
}
// Snapshot the shared buffer with a short lock.
let snapshot = match app.agent.readout.lock() {
Ok(buf) => {
if !buf.is_enabled() {
render_disabled(frame, area);
return;
}
let manifest = buf.manifest.clone().unwrap();
let entries: Vec<ReadoutEntry> =
buf.recent.iter().cloned().collect();
(manifest, entries)
}
Err(_) => {
render_disabled(frame, area);
return;
}
};
let (manifest, entries) = snapshot;
// Bound the selected layer to what the manifest actually has.
let n_layers = manifest.layers.len();
if self.selected_layer >= n_layers {
self.selected_layer = 0;
}
// Compute the raw values for the selected layer: either the
// latest token's row, or the mean across recent tokens. Raw
// means un-normalized dot products — their absolute scale is
// dominated by residual-stream norm, not concept alignment.
let raw: Option<Vec<f32>> = match self.mode {
DisplayMode::Current => entries
.last()
.and_then(|e| e.readout.get(self.selected_layer).cloned()),
DisplayMode::MeanRecent => mean_layer(&entries, self.selected_layer),
};
// Optional z-score normalization: remove the per-layer mean,
// scale by std. Result is "σ above/below the concept-vector
// average at this layer" — the loud-residual-stream scaling
// factor cancels out, values become comparable across frames.
let display_values = raw.as_ref().map(|v| {
if self.normalize { z_score(v) } else { v.clone() }
});
// Update the pinned display set with hysteresis: a concept
// stays pinned while it remains in the top (TOP_K + HYSTERESIS)
// by |value|; falls out only when it drops below that band.
// Keeps rows stable while values update in place.
if let Some(v) = display_values.as_ref() {
self.refresh_display_indices(v);
}
let layout = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3), // header
Constraint::Min(10), // bars
Constraint::Length(6), // recent tokens
])
.split(area);
render_header(frame, layout[0], &manifest, self.selected_layer,
self.mode, entries.len(), self.normalize);
match display_values {
Some(v) => render_bars(
frame, layout[1], &manifest.concepts, &v,
&self.display_indices, self.normalize,
),
None => render_empty_bars(frame, layout[1]),
}
render_recent(frame, layout[2], &entries, self.selected_layer,
&manifest.concepts);
}
}
impl AmygdalaScreen {
/// Add concepts entering the hysteresis band; evict concepts that
/// dropped out. Preserves existing order for concepts that stay.
fn refresh_display_indices(&mut self, values: &[f32]) {
let n = values.len();
if n == 0 {
return;
}
// Rank all concepts by |value| desc so we can check both "in
// strict top-K" and "in hysteresis band (top K + H)" cheaply.
let mut rank: Vec<(usize, f32)> = values.iter()
.enumerate().map(|(i, v)| (i, v.abs())).collect();
rank.sort_by(|a, b| b.1.partial_cmp(&a.1)
.unwrap_or(std::cmp::Ordering::Equal));
let hyst_cutoff = (TOP_K + HYSTERESIS).min(n);
let in_band: std::collections::HashSet<usize> =
rank.iter().take(hyst_cutoff).map(|(i, _)| *i).collect();
// Drop anything that left the band.
self.display_indices.retain(|i| in_band.contains(i));
// Fill up to TOP_K by walking the top-K-by-|value| and adding
// any concept not already displayed.
for (i, _) in rank.iter().take(TOP_K) {
if self.display_indices.len() >= TOP_K {
break;
}
if !self.display_indices.contains(i) {
self.display_indices.push(*i);
}
}
}
}
fn render_disabled(frame: &mut Frame, area: Rect) {
let text = Paragraph::new(Line::from(vec![
Span::raw("readout disabled — server did not return a manifest. "),
Span::styled("Start vLLM with ", Style::default().fg(Color::DarkGray)),
Span::styled("VLLM_READOUT_MANIFEST", Style::default().fg(Color::Yellow)),
Span::styled(" + ", Style::default().fg(Color::DarkGray)),
Span::styled("VLLM_READOUT_VECTORS", Style::default().fg(Color::Yellow)),
Span::styled(".", Style::default().fg(Color::DarkGray)),
]))
.wrap(Wrap { trim: true })
.block(Block::default().borders(Borders::ALL).title("amygdala"));
frame.render_widget(text, area);
}
fn render_header(frame: &mut Frame, area: Rect, manifest: &ReadoutManifest,
selected: usize, mode: DisplayMode, n_tokens: usize,
normalize: bool) {
let mode_str = match mode {
DisplayMode::Current => "current",
DisplayMode::MeanRecent => "mean(recent)",
};
let scale_str = if normalize { "z-score" } else { "raw" };
let layer = manifest.layers.get(selected).copied().unwrap_or(0);
let spans = vec![
Span::styled("layer ", Style::default().fg(Color::DarkGray)),
Span::styled(
format!("{}/{} ", selected + 1, manifest.layers.len()),
Style::default().add_modifier(Modifier::BOLD),
),
Span::styled("(index ", Style::default().fg(Color::DarkGray)),
Span::styled(format!("{}", layer), Style::default().fg(Color::Cyan)),
Span::styled(") ", Style::default().fg(Color::DarkGray)),
Span::styled("mode ", Style::default().fg(Color::DarkGray)),
Span::styled(mode_str, Style::default().fg(Color::Yellow)),
Span::styled(" scale ", Style::default().fg(Color::DarkGray)),
Span::styled(scale_str, Style::default().fg(Color::Yellow)),
Span::styled(" ", Style::default()),
Span::styled(
format!("{} toks in ring", n_tokens),
Style::default().fg(Color::DarkGray),
),
Span::raw(" "),
Span::styled(
format!("[1-{}] layer [t] mode [z] z-score/raw",
manifest.layers.len().min(9)),
Style::default().fg(Color::DarkGray),
),
];
let para = Paragraph::new(Line::from(spans))
.block(Block::default().borders(Borders::ALL).title("amygdala"));
frame.render_widget(para, area);
}
fn render_bars(frame: &mut Frame, area: Rect,
concepts: &[String], values: &[f32],
display_indices: &[usize], normalize: bool) {
let inner = Block::default().borders(Borders::ALL)
.title("top concepts");
let inner_area = inner.inner(area);
frame.render_widget(inner, area);
if inner_area.height == 0 || display_indices.is_empty() {
return;
}
// Bar-scale normalization. For z-score mode, pin the bar to a
// fixed reference (|z| = 3 = full bar) so the visual magnitude
// has a meaningful interpretation ("3σ from baseline"). For raw
// mode, fall back to the old behavior (scale to the loudest
// concept on-screen).
let scale_ref: f32 = if normalize {
3.0
} else {
display_indices.iter()
.filter_map(|&i| values.get(i))
.map(|v| v.abs())
.fold(0.0_f32, f32::max)
.max(1e-6)
};
let rows = (inner_area.height as usize).min(display_indices.len());
let row_constraints: Vec<Constraint> =
std::iter::repeat(Constraint::Length(1)).take(rows).collect();
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints(row_constraints)
.split(inner_area);
for (row, &c_idx) in display_indices.iter().take(rows).enumerate() {
let v = values.get(c_idx).copied().unwrap_or(0.0);
let label = concepts.get(c_idx).cloned()
.unwrap_or_else(|| format!("c{}", c_idx));
let ratio = (v.abs() / scale_ref).clamp(0.0, 1.0);
let color = if v >= 0.0 { Color::Green } else { Color::Red };
let display_num = if normalize {
format!("{:+.2}σ", v)
} else {
format!("{:+.3}", v)
};
let gauge = Gauge::default()
.ratio(ratio as f64)
.gauge_style(Style::default().fg(color).bg(Color::Reset))
.label(format!("{:<26} {}", truncate_name(&label, 26), display_num));
frame.render_widget(gauge, chunks[row]);
}
}
fn render_empty_bars(frame: &mut Frame, area: Rect) {
let para = Paragraph::new(Line::from(Span::styled(
"waiting for tokens…",
Style::default().fg(Color::DarkGray),
)))
.block(Block::default().borders(Borders::ALL).title("top concepts"));
frame.render_widget(para, area);
}
fn render_recent(frame: &mut Frame, area: Rect, entries: &[ReadoutEntry],
layer: usize, concepts: &[String]) {
let mut lines: Vec<Line> = Vec::new();
for entry in entries.iter().rev().take(4) {
let row = match entry.readout.get(layer) {
Some(r) => r,
None => continue,
};
// top concept at this layer for this token
let (best_idx, best_val) = row.iter().enumerate()
.fold((0, 0.0_f32), |acc, (i, v)| {
if v.abs() > acc.1.abs() { (i, *v) } else { acc }
});
let name = concepts.get(best_idx).cloned()
.unwrap_or_else(|| format!("c{}", best_idx));
let tok_str = format!("t{:>5}", entry.token_id);
lines.push(Line::from(vec![
Span::styled(tok_str, Style::default().fg(Color::DarkGray)),
Span::raw(" "),
Span::styled(
format!("{:<24}", truncate_name(&name, 24)),
Style::default().fg(
if best_val >= 0.0 { Color::Green } else { Color::Red },
),
),
Span::styled(
format!(" {:+.3}", best_val),
Style::default().add_modifier(Modifier::BOLD),
),
]));
}
let para = Paragraph::new(lines)
.block(Block::default().borders(Borders::ALL).title("recent tokens — top concept"));
frame.render_widget(para, area);
}
/// Z-score normalize: `(v - mean) / std` across the concept axis.
/// Result is comparable across frames and layers (the residual-stream
/// magnitude factors out) and has the nice property that "this is
/// ≥2σ elevated" has a concrete meaning regardless of scale.
fn z_score(values: &[f32]) -> Vec<f32> {
let n = values.len() as f32;
if n == 0.0 {
return Vec::new();
}
let mean = values.iter().sum::<f32>() / n;
let var = values.iter()
.map(|v| (v - mean) * (v - mean))
.sum::<f32>() / n;
let std = var.sqrt().max(1e-6);
values.iter().map(|v| (v - mean) / std).collect()
}
fn mean_layer(entries: &[ReadoutEntry], layer: usize) -> Option<Vec<f32>> {
let rows: Vec<&Vec<f32>> = entries.iter()
.filter_map(|e| e.readout.get(layer))
.collect();
if rows.is_empty() {
return None;
}
let n_concepts = rows[0].len();
let mut acc = vec![0.0_f32; n_concepts];
for r in &rows {
for (i, v) in r.iter().enumerate() {
acc[i] += *v;
}
}
let n = rows.len() as f32;
for v in &mut acc { *v /= n; }
Some(acc)
}
fn truncate_name(s: &str, max: usize) -> String {
if s.len() <= max { s.to_string() }
else { format!("{}", &s[..max.saturating_sub(1)]) }
}

View file

@ -112,7 +112,13 @@ pub async fn cmd_switch_model(
let _new_client = crate::agent::api::ApiClient::new(
&resolved.api_base, &resolved.api_key, &resolved.model_id,
);
agent.state.lock().await.notify(format!("switched to {}", resolved.model_id));
let prompt_changed = resolved.prompt_file != agent.prompt_file;
if prompt_changed {
agent.compact().await;
agent.state.lock().await.notify(format!("switched to {} (recompacted)", resolved.model_id));
} else {
agent.state.lock().await.notify(format!("switched to {}", resolved.model_id));
}
}
fn notify_help(agent: &std::sync::Arc<crate::agent::Agent>) {
@ -167,7 +173,6 @@ enum PaneTarget {
ConversationAssistant,
Tools,
ToolResult,
Autonomous,
}
const MAX_PANE_LINES: usize = 10_000;
@ -473,11 +478,8 @@ impl InteractScreen {
AstNode::Leaf(leaf) => {
let text = leaf.body().text().to_string();
match leaf.body() {
NodeBody::Memory { .. } | NodeBody::Log(_) | NodeBody::Dmn(_) => vec![],
NodeBody::Thinking(_) => {
if text.is_empty() { vec![] }
else { vec![(PaneTarget::Autonomous, text, Marker::None)] }
}
NodeBody::Memory { .. } | NodeBody::Thinking(_)
| NodeBody::Log(_) | NodeBody::Dmn(_) => vec![],
NodeBody::Content(_) => {
if text.is_empty() || text.starts_with("<system-reminder>") { vec![] }
else { vec![(PaneTarget::Conversation, text, Marker::User)] }
@ -490,11 +492,6 @@ impl InteractScreen {
if t.is_empty() { vec![] }
else { vec![(PaneTarget::ToolResult, text, Marker::None)] }
}
NodeBody::Image { orig_height, orig_width, .. } => {
vec![(PaneTarget::Conversation,
format!("[image {}x{}]", orig_width, orig_height),
Marker::None)]
}
}
}
AstNode::Branch { role, children, .. } => {
@ -551,12 +548,6 @@ impl InteractScreen {
self.tools.push_line(format!(" {}", line), Color::DarkGray);
}
}
PaneTarget::Autonomous => {
self.autonomous.current_color = Color::Gray;
self.autonomous.append_text(&text);
self.autonomous.pending_marker = marker;
self.autonomous.flush_pending();
}
}
}
}
@ -568,8 +559,6 @@ impl InteractScreen {
=> self.conversation.pop_line(),
PaneTarget::Tools | PaneTarget::ToolResult
=> self.tools.pop_line(),
PaneTarget::Autonomous
=> self.autonomous.pop_line(),
}
}
}

View file

@ -1,111 +0,0 @@
// compare.rs — F7 compare screen: side-by-side test-model regen of
// every assistant response in the current context.
use ratatui::{
layout::Rect,
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
Frame,
};
use ratatui::crossterm::event::{Event, KeyCode};
use super::{App, ScreenView, truncate, widgets};
pub use crate::subconscious::compare::CompareCandidate;
pub(crate) struct CompareScreen {
list_state: ListState,
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
}
impl CompareScreen {
pub fn new(
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
) -> Self {
Self { list_state: ListState::default(), mind_tx }
}
}
impl ScreenView for CompareScreen {
fn label(&self) -> &'static str { "compare" }
fn tick(&mut self, frame: &mut Frame, area: Rect,
events: &[Event], app: &mut App) {
widgets::handle_list_nav(events, &mut self.list_state,
app.compare_candidates.len(), |code| match code {
KeyCode::Char('c') | KeyCode::Enter => {
let _ = self.mind_tx.send(crate::mind::MindCommand::Compare);
}
_ => {}
});
let (settings_area, content_area, help_area) =
widgets::candidate_frame(frame, area, "compare");
let test_backend = crate::config::app().compare.test_backend.clone();
let (label, color) = if test_backend.is_empty() {
("(unset — set compare.test_backend)".to_string(), Color::Red)
} else {
(test_backend, Color::Yellow)
};
frame.render_widget(Paragraph::new(Line::from(vec![
Span::raw(" test model: "),
Span::styled(label, Style::default().fg(color)),
])), settings_area);
let candidates = &app.compare_candidates;
if candidates.is_empty() {
let err = app.mind_state.as_ref().and_then(|ms| ms.compare_error.as_deref());
let mut lines = vec![Line::from(""),
Line::styled(" Press c/Enter to compare against the configured test model.",
Style::default().fg(Color::DarkGray))];
if let Some(e) = err {
lines.push(Line::from(""));
lines.push(Line::from(vec![
Span::raw(" "),
Span::styled(format!("error: {}", e), Style::default().fg(Color::Red)),
]));
}
frame.render_widget(Paragraph::new(lines), content_area);
} else {
let (list_area, detail_area) = widgets::list_detail_split(content_area);
let items: Vec<ListItem> = candidates.iter().map(|c| ListItem::new(Line::from(vec![
Span::styled(format!("#{:<3} ", c.entry_idx), Style::default().fg(Color::DarkGray)),
Span::raw(truncate(&c.original_text, 30)),
]))).collect();
frame.render_stateful_widget(
List::new(items)
.block(Block::default().borders(Borders::RIGHT).title(" candidates "))
.highlight_style(Style::default().add_modifier(Modifier::REVERSED)),
list_area, &mut self.list_state,
);
if let Some(c) = self.list_state.selected().and_then(|i| candidates.get(i)) {
let mut text = String::new();
if !c.prior_context.is_empty() {
text.push_str(&c.prior_context);
text.push_str("\n\n─── original ───\n\n");
}
text.push_str(&c.original_text);
text.push_str("\n\n─── test model ───\n\n");
text.push_str(&c.alternate_text);
frame.render_widget(
Paragraph::new(text)
.block(Block::default().borders(Borders::TOP)
.title(format!(" entry {} ", c.entry_idx)))
.wrap(Wrap { trim: false }),
detail_area,
);
}
}
frame.render_widget(Paragraph::new(Line::from(vec![
Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)),
Span::raw("=nav "),
Span::styled("c/Enter", Style::default().fg(Color::Green)),
Span::raw("=run "),
])), help_area);
}
}

View file

@ -38,13 +38,16 @@ impl ConsciousScreen {
for node in ctx.conversation() {
if let AstNode::Leaf(leaf) = node {
if let NodeBody::Memory { key, score, text } = leaf.body() {
if score.is_some() { scored += 1; } else { unscored += 1; }
let status = match score {
Some(s) => { scored += 1; format!("{:.2}", s) }
None => { unscored += 1; String::new() }
};
mem_children.push(SectionView {
name: format!("mem: {}", key),
name: key.clone(),
tokens: node.tokens(),
content: text.clone(),
children: Vec::new(),
status: score.map(|s| format!("{:.2}", s)).unwrap_or_default(),
status,
});
}
}
@ -126,7 +129,14 @@ impl ScreenView for ConsciousScreen {
let section_style = Style::default().fg(Color::Yellow);
lines.push(Line::styled("── Model ──", section_style));
lines.push(Line::raw(format!(" Current: {}", app.status.model)));
let model_display = app.context_info.as_ref()
.map_or_else(|| app.status.model.clone(), |i| i.model.clone());
lines.push(Line::raw(format!(" Current: {}", model_display)));
if let Some(ref info) = app.context_info {
lines.push(Line::raw(format!(" Backend: {}", info.backend)));
lines.push(Line::raw(format!(" Prompt: {}", info.prompt_file)));
lines.push(Line::raw(format!(" Available: {}", info.available_models.join(", "))));
}
lines.push(Line::raw(""));
lines.push(Line::styled("── Context State ──", section_style));
@ -146,6 +156,8 @@ impl ScreenView for ConsciousScreen {
lines.push(Line::raw(format!(" {:53} {:>6} tokens", "────────", "──────")));
lines.push(Line::raw(format!(" {:53} {:>6} tokens", "Total", total)));
} else if let Some(ref info) = app.context_info {
lines.push(Line::raw(format!(" Context message: {:>6} chars", info.context_message_chars)));
}
lines.push(Line::raw(""));

View file

@ -1,284 +0,0 @@
// learn.rs — F6: fine-tuning review screen
//
// Shows responses identified as training candidates (high divergence
// when memories stripped). Queue for review before sending to /finetune.
use ratatui::{
layout::{Constraint, Layout, Rect},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
Frame,
};
use ratatui::crossterm::event::{Event, KeyCode};
use super::{App, ScreenView, truncate, widgets};
/// A candidate response identified for fine-tuning.
#[derive(Clone, Debug)]
pub struct FinetuneCandidate {
/// Index in conversation entries.
pub entry_idx: usize,
/// Divergence score (higher = more dependent on memories).
pub divergence: f64,
/// The assistant response text.
pub response_text: String,
/// Prior user/assistant messages for review context.
pub prior_context: String,
/// Status: pending, approved, rejected, sent.
pub status: CandidateStatus,
/// Token IDs for context.
pub context_ids: Vec<u32>,
/// Token IDs for continuation (what we're training on).
pub continuation_ids: Vec<u32>,
/// What the model would have said without memories (if generated).
pub alternate_text: Option<String>,
/// Timestamp in nanos — used as unique key for trained-set dedup.
pub timestamp_ns: i64,
}
#[derive(Clone, Debug, PartialEq)]
pub enum CandidateStatus {
Pending,
Approved,
Rejected,
Sent,
}
impl From<crate::subconscious::learn::FinetuneCandidate> for FinetuneCandidate {
fn from(c: crate::subconscious::learn::FinetuneCandidate) -> Self {
FinetuneCandidate {
entry_idx: c.entry_idx,
divergence: c.divergence,
response_text: c.response_text,
prior_context: c.prior_context,
status: CandidateStatus::Pending,
context_ids: c.context_ids,
continuation_ids: c.continuation_ids,
alternate_text: c.alternate_text,
timestamp_ns: c.timestamp_ns,
}
}
}
pub(crate) struct LearnScreen {
list_state: ListState,
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
}
impl LearnScreen {
pub fn new(
mind_tx: tokio::sync::mpsc::UnboundedSender<crate::mind::MindCommand>,
) -> Self {
Self {
list_state: ListState::default(),
mind_tx,
}
}
fn selected_idx(&self) -> Option<usize> {
self.list_state.selected()
}
}
impl ScreenView for LearnScreen {
fn label(&self) -> &'static str { "learn" }
fn tick(&mut self, frame: &mut Frame, area: Rect,
events: &[Event], app: &mut App) {
let selected_idx = self.list_state.selected();
widgets::handle_list_nav(events, &mut self.list_state,
app.finetune_candidates.len(), |code| match code {
KeyCode::Char('a') => {
if let Some(idx) = selected_idx {
app.finetune_action(idx, CandidateStatus::Approved);
}
}
KeyCode::Char('r') => {
if let Some(idx) = selected_idx {
app.finetune_action(idx, CandidateStatus::Rejected);
}
}
KeyCode::Char('g') => {
let current = crate::config::app().learn.generate_alternates;
let _ = self.mind_tx.send(
crate::mind::MindCommand::SetLearnGenerateAlternates(!current));
}
KeyCode::Char('s') => { app.finetune_send_approved(); }
KeyCode::Char('+') | KeyCode::Char('=') => {
let new = crate::config::app().learn.threshold * 10.0;
let _ = self.mind_tx.send(crate::mind::MindCommand::SetLearnThreshold(new));
}
KeyCode::Char('-') => {
let new = crate::config::app().learn.threshold / 10.0;
let _ = self.mind_tx.send(crate::mind::MindCommand::SetLearnThreshold(new));
}
_ => {}
});
let (settings_area, content_area, help_area) =
widgets::candidate_frame(frame, area, "learn");
let (threshold, gen_on) = {
let app_cfg = crate::config::app();
(app_cfg.learn.threshold, app_cfg.learn.generate_alternates)
};
let settings = Line::from(vec![
Span::raw(" thresh: "),
Span::styled(format!("{:e}", threshold), Style::default().fg(Color::Yellow)),
Span::raw(" gen: "),
Span::styled(
if gen_on { "[on]" } else { "[off]" },
Style::default().fg(if gen_on { Color::Green } else { Color::DarkGray }),
),
]);
frame.render_widget(Paragraph::new(settings), settings_area);
let candidates = &app.finetune_candidates;
if candidates.is_empty() {
render_empty(frame, content_area, app);
} else {
let (list_area, detail_area) = widgets::list_detail_split(content_area);
// Render candidate list
let items: Vec<ListItem> = candidates.iter().map(|c| {
let status_char = match c.status {
CandidateStatus::Pending => ' ',
CandidateStatus::Approved => '+',
CandidateStatus::Rejected => '-',
CandidateStatus::Sent => '*',
};
let style = match c.status {
CandidateStatus::Pending => Style::default(),
CandidateStatus::Approved => Style::default().fg(Color::Green),
CandidateStatus::Rejected => Style::default().fg(Color::DarkGray),
CandidateStatus::Sent => Style::default().fg(Color::Cyan),
};
ListItem::new(Line::from(vec![
Span::styled(format!("[{}] ", status_char), style),
Span::styled(format!("{:.2} ", c.divergence), Style::default().fg(Color::Yellow)),
Span::raw(truncate(&c.response_text, 30)),
]))
}).collect();
let list = List::new(items)
.block(Block::default().borders(Borders::RIGHT).title(" candidates "))
.highlight_style(Style::default().add_modifier(Modifier::REVERSED));
frame.render_stateful_widget(list, list_area, &mut self.list_state);
// Render detail for selected candidate
if let Some(idx) = self.selected_idx() {
if let Some(candidate) = candidates.get(idx) {
render_detail(frame, candidate, detail_area);
}
}
}
frame.render_widget(Paragraph::new(Line::from(vec![
Span::styled(" j/k/\u{2191}\u{2193}", Style::default().fg(Color::Cyan)),
Span::raw("=nav "),
Span::styled("a", Style::default().fg(Color::Green)),
Span::raw("=approve "),
Span::styled("r", Style::default().fg(Color::Red)),
Span::raw("=reject "),
Span::styled("g", Style::default().fg(Color::Yellow)),
Span::raw("=gen "),
Span::styled("s", Style::default().fg(Color::Magenta)),
Span::raw("=send "),
Span::styled("+/-", Style::default().fg(Color::Cyan)),
Span::raw("=thresh "),
])), help_area);
}
}
fn render_empty(frame: &mut Frame, inner: Rect, app: &App) {
let mut lines = Vec::new();
lines.push(Line::from(""));
match app.mind_state.as_ref().and_then(|ms| ms.finetune_last_run.as_ref()) {
Some(stats) => {
lines.push(Line::from(vec![
Span::raw(" Last run: "),
Span::styled(
format!("{}", stats.responses_considered),
Style::default().fg(Color::Cyan),
),
Span::raw(" responses considered, "),
Span::styled(
format!("{}", stats.above_threshold),
Style::default().fg(if stats.above_threshold > 0 { Color::Green } else { Color::DarkGray }),
),
Span::raw(" above threshold, max divergence: "),
Span::styled(
format!("{:.4}", stats.max_divergence),
Style::default().fg(Color::Yellow),
),
]));
if let Some(err) = &stats.error {
lines.push(Line::from(vec![
Span::raw(" "),
Span::styled(
format!("Error: {}", err),
Style::default().fg(Color::Red),
),
]));
}
}
None => {
lines.push(Line::styled(
" No scoring run yet.",
Style::default().fg(Color::DarkGray),
));
}
}
lines.push(Line::from(""));
lines.push(Line::styled(
" Scoring runs at startup and after each turn.",
Style::default().fg(Color::DarkGray),
));
frame.render_widget(Paragraph::new(lines), inner);
}
fn render_detail(frame: &mut Frame, c: &FinetuneCandidate, area: Rect) {
let [header_area, content_area] = Layout::vertical([
Constraint::Length(3),
Constraint::Min(1),
]).areas(area);
// Header: divergence, status
let alt_status = if c.alternate_text.is_some() { "yes" } else { "no" };
let header = Paragraph::new(vec![
Line::from(vec![
Span::raw(" divergence: "),
Span::styled(format!("{:.3}", c.divergence), Style::default().fg(Color::Yellow)),
Span::raw(format!(" entry: {} alt: {}", c.entry_idx, alt_status)),
]),
]);
frame.render_widget(header, header_area);
// Content: prior context, the scored response, and alternate
// (if available).
let content_block = Block::default()
.borders(Borders::TOP)
.title(" context & response ");
let mut text = String::new();
if !c.prior_context.is_empty() {
text.push_str(&c.prior_context);
text.push_str("\n\n─── response ───\n\n");
}
text.push_str(&c.response_text);
if let Some(alt) = &c.alternate_text {
text.push_str("\n\n─── without memories ───\n\n");
text.push_str(alt);
}
let content = Paragraph::new(text)
.block(content_block)
.wrap(Wrap { trim: false });
frame.render_widget(content, content_area);
}

View file

@ -3,16 +3,13 @@
// TUI, UI channel, parsing. The cognitive layer (session state
// machine, DMN, identity) lives in mind/.
pub(crate) mod amygdala;
pub(crate) mod chat;
pub(crate) mod compare;
mod context;
pub(crate) mod learn;
pub(crate) mod scroll_pane;
pub mod selectable;
mod subconscious;
mod thalamus;
mod unconscious;
mod thalamus;
mod widgets;
use anyhow::Result;
@ -47,6 +44,15 @@ struct StatusInfo {
}
/// Context loading details for the debug screen.
#[derive(Debug, Clone)]
struct ContextInfo {
model: String,
available_models: Vec<String>,
prompt_file: String,
backend: String,
context_message_chars: usize,
}
/// Build the screen legend from screen labels.
fn screen_legend_from(screens: &[Box<dyn ScreenView>]) -> String {
let parts: Vec<String> = screens.iter().enumerate()
@ -66,13 +72,6 @@ fn screen_legend() -> String {
SCREEN_LEGEND.get().cloned().unwrap_or_default()
}
/// Return the first line of `s`, truncated to `max` chars with an
/// ellipsis suffix. Used by candidate-list screens.
fn truncate(s: &str, max: usize) -> String {
let first = s.lines().next().unwrap_or("");
if first.len() > max { format!("{}...", &first[..max]) } else { first.to_string() }
}
/// A screen that can draw itself and handle input.
trait ScreenView: Send {
fn tick(&mut self, frame: &mut ratatui::Frame, area: ratatui::layout::Rect,
@ -110,6 +109,7 @@ struct App {
top_k: u32,
agent: std::sync::Arc<crate::agent::Agent>,
should_quit: bool,
context_info: Option<ContextInfo>,
agent_state: Vec<crate::mind::SubconsciousSnapshot>,
unconscious_state: Vec<crate::mind::UnconsciousSnapshot>,
mind_state: Option<crate::mind::MindState>,
@ -121,10 +121,6 @@ struct App {
walked_count: usize,
channel_status: Vec<ChannelStatus>,
idle_info: Option<IdleInfo>,
/// Fine-tuning candidates pending review.
finetune_candidates: Vec<learn::FinetuneCandidate>,
/// F7 compare candidates — response pairs from test-model comparison.
compare_candidates: Vec<compare::CompareCandidate>,
}
impl App {
@ -146,6 +142,7 @@ impl App {
top_k: 20,
agent,
should_quit: false,
context_info: None,
agent_state: Vec::new(),
unconscious_state: Vec::new(),
mind_state: None,
@ -154,53 +151,9 @@ impl App {
rebuild_tools_pending: false,
walked_count: 0,
channel_status: Vec::new(), idle_info: None,
finetune_candidates: Vec::new(),
compare_candidates: Vec::new(),
}
}
fn finetune_action(&mut self, idx: usize, status: learn::CandidateStatus) {
if let Some(candidate) = self.finetune_candidates.get_mut(idx) {
candidate.status = status;
}
}
fn finetune_send_approved(&mut self) {
// Collect approved candidates
let samples: Vec<crate::subconscious::learn::TrainData> = self.finetune_candidates.iter()
.filter(|c| c.status == learn::CandidateStatus::Approved)
.map(|c| crate::subconscious::learn::TrainData {
context_ids: c.context_ids.clone(),
continuation_ids: c.continuation_ids.clone(),
timestamp_ns: c.timestamp_ns,
})
.collect();
if samples.is_empty() {
return;
}
// Mark as sent in UI immediately
for candidate in &mut self.finetune_candidates {
if candidate.status == learn::CandidateStatus::Approved {
candidate.status = learn::CandidateStatus::Sent;
}
}
// Spawn async task to send to training server
let client = self.agent.client.clone();
tokio::spawn(async move {
match crate::subconscious::learn::send_to_train(samples, &client).await {
Ok(job_id) => {
dbglog!("[finetune] training started: {}", job_id);
}
Err(e) => {
dbglog!("[finetune] send failed: {:#}", e);
}
}
});
}
fn set_channel_status(&mut self, channels: Vec<(String, bool, u32)>) {
self.channel_status = channels.into_iter()
@ -240,9 +193,6 @@ fn restore_terminal(terminal: &mut ratatui::Terminal<CrosstermBackend<io::Stdout
async fn start(cli: crate::user::CliArgs) -> Result<()> {
let (config, _figment) = crate::config::load_session(&cli).await?;
// Pick up external edits (vim, F6 hotkeys, etc.) without restart.
crate::config::watch_config(cli.clone());
if config.app.debug {
unsafe { std::env::set_var("POC_DEBUG", "1") };
}
@ -384,7 +334,7 @@ async fn run(
}
let notify_rx = crate::thalamus::channels::subscribe_all();
// F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus, F6=learn, F7=compare, F8=amygdala
// F1=chat, F2=conscious, F3=subconscious, F4=unconscious, F5=thalamus
let mut screens: Vec<Box<dyn tui::ScreenView>> = vec![
Box::new(crate::user::chat::InteractScreen::new(
mind.agent.clone(), mind.shared.clone(), mind_tx.clone(),
@ -393,9 +343,6 @@ async fn run(
Box::new(crate::user::subconscious::SubconsciousScreen::new()),
Box::new(crate::user::unconscious::UnconsciousScreen::new()),
Box::new(crate::user::thalamus::ThalamusScreen::new()),
Box::new(crate::user::learn::LearnScreen::new(mind_tx.clone())),
Box::new(crate::user::compare::CompareScreen::new(mind_tx.clone())),
Box::new(crate::user::amygdala::AmygdalaScreen::new()),
];
let mut active_screen: usize = 1; // F-key number
tui::set_screen_legend(tui::screen_legend_from(&*screens));
@ -472,8 +419,7 @@ async fn run(
idle_state.decay_ewma();
app.update_idle(&idle_state);
app.agent_state = mind.subconscious_snapshots().await;
{
let mut unc = mind.unconscious.lock().await;
if let Ok(mut unc) = mind.unconscious.try_lock() {
let toggles: Vec<String> = app.agent_toggles.drain(..).collect();
for name in &toggles {
if mind.subconscious.lock().await.toggle(name).is_none() {
@ -487,42 +433,7 @@ async fn run(
};
app.unconscious_state = unc.snapshots(store_guard.as_deref());
app.graph_health = unc.graph_health.clone();
}
// Sync mind state (finetune candidates, last scoring run, etc.)
{
let ms = mind.shared.lock().unwrap();
// Sync finetune candidates: add new ones, keep existing (preserves approval status),
// remove sent candidates, keep only 10 most recent rejected.
app.finetune_candidates.retain(|c| c.status != learn::CandidateStatus::Sent);
for c in &ms.finetune_candidates {
let exists = app.finetune_candidates.iter()
.any(|existing| existing.timestamp_ns == c.timestamp_ns);
if !exists {
app.finetune_candidates.push(learn::FinetuneCandidate::from(c.clone()));
}
}
let mut rejected: Vec<_> = app.finetune_candidates.iter()
.enumerate()
.filter(|(_, c)| c.status == learn::CandidateStatus::Rejected)
.map(|(i, c)| (i, c.timestamp_ns))
.collect();
if rejected.len() > 10 {
rejected.sort_by_key(|(_, ts)| std::cmp::Reverse(*ts));
let to_remove: std::collections::HashSet<_> = rejected[10..]
.iter().map(|(i, _)| *i).collect();
let mut idx = 0;
app.finetune_candidates.retain(|_| {
let keep = !to_remove.contains(&idx);
idx += 1;
keep
});
}
// Sync compare candidates — a fresh run clears, so take a snapshot.
app.compare_candidates = ms.compare_candidates.clone();
app.mind_state = Some(ms.clone());
app.mind_state = Some(mind.shared.lock().unwrap().clone());
}
app.walked_count = mind.subconscious_walked().await.len();
if !startup_done {
@ -619,11 +530,16 @@ async fn run(
// --- CLI ---
use clap::{Parser, Subcommand};
use std::path::PathBuf;
#[derive(Parser, Debug, Default, Clone)]
#[derive(Parser, Debug, Default)]
#[command(name = "consciousness", about = "Substrate-independent AI agent")]
pub struct CliArgs {
/// Model override (selects a named entry from `models` in config.json5)
/// Select active backend ("anthropic" or "openrouter")
#[arg(long)]
pub backend: Option<String>,
/// Model override
#[arg(short, long)]
pub model: Option<String>,
@ -643,6 +559,10 @@ pub struct CliArgs {
#[arg(long)]
pub show_config: bool,
/// Project memory directory
#[arg(long)]
pub memory_project: Option<PathBuf>,
/// Max consecutive DMN turns
#[arg(long)]
pub dmn_max_turns: Option<u32>,
@ -655,7 +575,7 @@ pub struct CliArgs {
pub command: Option<SubCmd>,
}
#[derive(Subcommand, Debug, Clone)]
#[derive(Subcommand, Debug)]
pub enum SubCmd {
/// Print new output since last read and exit
Read {
@ -756,10 +676,8 @@ fn restore_stderr(original_fd: std::os::fd::RawFd) {
#[tokio::main]
pub async fn main() {
// Reap channel-daemon zombies via a SIGCHLD handler that only touches
// PIDs listed in channels_dir(). Avoids SIGCHLD=SIG_IGN, which would
// break tokio::process::Command::wait() (kernel auto-reap → ECHILD).
let _reaper = crate::thalamus::supervisor::start_zombie_reaper();
// Auto-reap child processes (channel daemons outlive the supervisor)
unsafe { libc::signal(libc::SIGCHLD, libc::SIG_IGN); }
// Redirect stderr to pipe — logs to file and sends to channel for UI display
let stderr_capture = redirect_stderr_to_pipe();

View file

@ -6,7 +6,7 @@ use ratatui::{
widgets::{Block, Borders},
crossterm::event::KeyCode,
};
use crate::agent::context::{AstNode, Ast, NodeBody};
use crate::agent::context::{AstNode, Ast};
#[derive(Debug, Clone)]
pub struct SectionView {
@ -20,22 +20,13 @@ pub struct SectionView {
fn node_to_view(node: &AstNode) -> SectionView {
match node {
AstNode::Leaf(leaf) => {
let (name, status) = match leaf.body() {
NodeBody::Memory { key, score, .. } => {
let s = score.map(|v| format!("{:.2}", v)).unwrap_or_default();
(format!("mem: {}", key), s)
}
_ => (node.label(), String::new()),
};
SectionView {
name,
tokens: node.tokens(),
content: leaf.body().text().to_string(),
children: Vec::new(),
status,
}
}
AstNode::Leaf(leaf) => SectionView {
name: node.label(),
tokens: node.tokens(),
content: leaf.body().text().to_string(),
children: Vec::new(),
status: String::new(),
},
AstNode::Branch { children, .. } => {
let child_views: Vec<SectionView> = children.iter()
.map(|c| node_to_view(c))
@ -109,73 +100,6 @@ pub fn tree_legend() -> Line<'static> {
)
}
// ---------------------------------------------------------------------------
// Candidate-browser screen skeleton (F6 learn, F7 compare, future screens)
// ---------------------------------------------------------------------------
use ratatui::{
layout::{Constraint, Layout, Rect},
widgets::ListState,
crossterm::event::{Event, KeyEvent},
Frame,
};
/// Frame a candidate-browser screen: outer magenta-bordered block with
/// the screen legend on the left and `title` on the right, split into
/// (settings_row, content_area, help_row). Caller renders into the
/// three sub-areas.
pub fn candidate_frame(frame: &mut Frame, area: Rect, title: &str) -> (Rect, Rect, Rect) {
let block = Block::default()
.title_top(Line::from(super::screen_legend()).left_aligned())
.title_top(Line::from(format!(" {} ", title)).right_aligned())
.borders(Borders::ALL)
.border_style(Style::default().fg(Color::Magenta));
let inner = block.inner(area);
frame.render_widget(block, area);
let [settings, content] = Layout::vertical([
Constraint::Length(1), Constraint::Min(0),
]).areas(inner);
let help = Rect { y: area.y + area.height - 1, height: 1, ..area };
(settings, content, help)
}
/// 40/60 horizontal split for list + detail panes within the content area.
pub fn list_detail_split(content: Rect) -> (Rect, Rect) {
let [list, detail] = Layout::horizontal([
Constraint::Percentage(40), Constraint::Percentage(60),
]).areas(content);
(list, detail)
}
/// Handle j/k/↑/↓ list navigation and keep the selection in bounds.
/// Any other key is passed to `on_other` for screen-specific handling.
pub fn handle_list_nav(
events: &[Event],
list_state: &mut ListState,
count: usize,
mut on_other: impl FnMut(KeyCode),
) {
for event in events {
if let Event::Key(KeyEvent { code, .. }) = event {
match code {
KeyCode::Up | KeyCode::Char('k') => {
let i = list_state.selected().unwrap_or(0);
list_state.select(Some(i.saturating_sub(1)));
}
KeyCode::Down | KeyCode::Char('j') => {
let i = list_state.selected().unwrap_or(0);
list_state.select(Some((i + 1).min(count.saturating_sub(1))));
}
_ => on_other(*code),
}
}
}
if count > 0 {
let sel = list_state.selected().unwrap_or(0).min(count - 1);
list_state.select(Some(sel));
}
}
// ---------------------------------------------------------------------------
// SectionTree — expand/collapse tree renderer for ContextSection

View file

@ -3,7 +3,7 @@
## Overview
Continuous fine-tuning of Qwen3.5-27B alongside live vLLM inference.
Full-weight updates (not LoRA) using Apollo optimizer with rank-64
Full-weight updates (not LoRA) using Apollo optimizer with rank-256
gradient projection. No pause required — HOGWILD concurrent training.
Weights shared via CUDA IPC between vLLM and the training process.
@ -22,41 +22,25 @@ The training signal comes from two sources:
│ │
│ ┌──────────────────────────────────────────────┐ │
│ │ Model Weights (54GB, bf16) │ │
│ │ Shared: vLLM inference + HF training │ │
│ │ Shared via CUDA IPC │ │
│ └──────────────┬──────────────┬────────────────┘ │
│ │ │ │
│ ┌──────────────▼──┐ ┌───────▼────────────────┐ │
│ │ vLLM (inference)│ │ Training subprocess │ │
│ │ KV cache ~60GB │ │ HF model wrapper │ │
│ │ /completions │ │ Apollo optimizer ~2.5GB │ │
│ │ /score │ │ Checkpoint sync │ │
│ └────────┬────────┘ └───────────▲─────────────┘ │
│ │ │ │
│ │ ZMQ IPC │ │
│ └───────────────────────┘ │
│ │ vLLM (inference)│ │ Apollo (training) │ │
│ │ KV cache ~60GB │ │ Gradients ~54GB │ │
│ │ Serves requests │ │ Optimizer state ~10GB │ │
│ │ Never paused │ │ Activations ~10GB │ │
│ └─────────────────┘ └────────────────────────┘ │
└─────────────────────────────────────────────────────┘
Process Architecture:
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ vLLM Worker │ │ vLLM API Server │ │ Training Worker │
│ (GPU inference) │ │ (HTTP routes) │ │ (GPU training) │
│ │ │ │ │ │
│ export_hook.py │ │ /completions │ │ HF model views │
│ exports IPC │ │ /score │ │ Apollo optimizer│
│ handles on load │ │ /train ─────────┼──► ZMQ REP socket │
└─────────────────┘ └─────────────────┘ └─────────────────┘
│ │
└──── IPC handles file ──────────────────┘
/tmp/vllm_weight_handles.pt
Moria B200 (vLLM)
Moria B200
┌──────────────────┐ ┌──────────────────┐
│ Training signal │ HTTP │ /completions
│ agent │──────────>│ /score
│ │ │ /train
│ Dream loop │ │ /checkpoint
│ (generates │ │ /train/status
│ scenarios) │ │
│ Training signal │ HTTP │ Apollo worker │
│ agent │──────────>│ daemon │
│ │ │ │
│ Dream loop │ │ Checkpoint sync │
│ (generates │ │ (mmap + diff, │
│ scenarios) │ │ every 10 min) │
└──────────────────┘ └──────────────────┘
```
@ -75,9 +59,10 @@ LoRA trains adapter matrices, not base weights. For personality and
behavioral changes that persist as disposition, the base weights
need to change. Apollo makes this memory-feasible.
### Rank 64
Not Mini (rank-1). Rank-64 captures gradient structure across diverse
training examples while keeping memory low (~2.5GB on 27B model).
### Rank 256
Not Mini (rank-1). With 100+ diverse training examples, the
gradient's effective dimensionality can reach hundreds. Rank-256
captures the structure. Memory cost: ~10GB (negligible on B200).
Compute cost: <0.25% of forward+backward.
### Channel-wise scaling
@ -105,7 +90,7 @@ from a per-parameter seed each step.
### Parameter grouping (Qwen3.5 gotcha)
conv1d weights are 3D tensors [10240, 1, 4]. Apollo's projector
needs 2D matrices with min dimension >= rank. Small/3D tensors
use standard Adam. Large 2D matrices use Apollo.
use standard Adam. Large 2D matrices use Apollo with rank-256.
## Training Data Pipeline
@ -215,42 +200,16 @@ against live GPU weights block by block, memcpy only changed
regions. For small behavioral updates, turns a 54GB write into
a few hundred MB.
- Scheduled 10 minutes after training (batched)
- Every 10 minutes via cron on B200
- Daily rsync to moria for long-term storage
- Tool: `apollo-checkpoint sync --model-dir <path>`
## State Files
### B200 (training server)
| File | Purpose |
|------|---------|
| `/tmp/vllm_weight_handles.pt` | CUDA IPC handles for weight sharing. Written by export_hook on vLLM startup. Read by training_worker to construct HF model with vLLM weight views. Includes metadata (model_path). |
| `/tmp/apollo_optimizer_state.pt` | Apollo optimizer state (momentum, variance estimates). Saved during checkpoint sync and on worker shutdown, restored on next training_worker startup. Preserves training continuity across sessions. |
| `/tmp/apollo_training.sock` | ZMQ IPC socket for communication between API server (/train endpoint) and training_worker subprocess. |
| `<model_dir>/*.safetensors` | Model weights. Updated in-place by checkpoint_sync. |
### Moria (client)
| File | Purpose |
|------|---------|
| `~/.consciousness/cache/trained-responses.json` | Timestamps (ms) of responses already sent to /train. Prevents re-training the same response. |
| `~/.consciousness/cache/finetune-alternates` | Marker file. If exists, alternate responses are generated during divergence scoring to show what model would say without memories. |
### In-memory (training_worker subprocess)
| State | Location | Notes |
|-------|----------|-------|
| Apollo optimizer | TrainingWorker.optimizer | ~2.5GB for rank-64. Persisted to `/tmp/apollo_optimizer_state.pt` during checkpoint sync and on shutdown. |
| HF model with vLLM views | TrainingWorker.model | Loaded on worker startup from IPC handles. Parameters point to vLLM's GPU memory. |
| ZMQ socket | TrainingWorker.zmq_socket | REP socket bound to `/tmp/apollo_training.sock`. |
- Tool: `apollo-checkpoint sync --model-dir <path>` (Rust)
## Hyperparameters
| Parameter | Value | Rationale |
|-----------|-------|-----------|
| Learning rate | 1e-5 to 1e-4 | Standard for full fine-tuning. Higher for diverse batches. |
| Rank | 64 | Captures gradient structure. ~2.5GB state. Defined in `train_router.DEFAULT_RANK`. |
| Rank | 256 | Captures gradient structure across 100+ examples. ~10GB state. |
| Scale type | channel | Per-channel precision, matches LLaMA-Factory defaults. |
| Epochs | 1 | One pass over diverse data. Multiple epochs risk overfitting. |
| Batch size | 1 | Single examples, immediate updates. |
@ -261,32 +220,34 @@ a few hundred MB.
## Components
### Built ✓
- `optimizer.py` — Apollo optimizer (configurable rank)
- `train_router.py` — /train endpoint, forwards to training subprocess via ZMQ
- `training_worker.py` — training subprocess (HF model, Apollo, checkpoint sync)
- `apollo_mini.py` — Apollo optimizer (configurable rank, default 256)
- `apollo_worker.py` — HTTP daemon (aiohttp, job tracking)
- `weight_mapping.py` — vLLM merged → HF separate views (validated)
- `export_hook.py` — vLLM plugin hook for IPC handle export
- `checkpoint_sync.py` — mmap + diff checkpoint sync (Python)
- `training_example.py` — tokenization with chat template
- `vllm_export_hook.py` — source patch for IPC handle export
- `checkpoint/` — Rust tool for mmap + diff checkpoint sync
### To build
- **Dream loop → training bridge**: connect dream output to /train
- **Dream loop → training bridge**: connect dream output to Apollo
- **Training-signal agent**: flags moments in conversation logs
- **Instruction stripping**: remove scaffolding from training examples
- **Quality monitoring**: track model capability over time
- **HF model forward pass integration**: wire into apollo_worker
## Files
```
training/
DESIGN.md — this document
pyproject.toml — package config, vLLM plugin entry point
apollo_plugin/
__init__.py — plugin registration
export_hook.py — patches vLLM worker to export IPC handles
train_router.py — /train endpoint, forwards to worker via ZMQ
training_worker.py — training subprocess (HF model, Apollo, checkpoint)
optimizer.py — Apollo optimizer
weight_mapping.py — vLLM ↔ HF weight views
checkpoint_sync.py — mmap + diff sync to safetensors
steering.py — steering vector extraction (experimental)
DESIGN.md — this document
apollo_mini.py — Apollo optimizer
apollo_worker.py — HTTP training daemon
weight_mapping.py — vLLM ↔ HF weight views
training_example.py — tokenization helpers
export_weights.py — standalone weight export (unused)
vllm_export_hook.py — vLLM source patch for IPC export
start_vllm_with_apollo.sh — vLLM launcher (unused, using source patch)
train.py — standalone training script (alternative)
checkpoint/
Cargo.toml — Rust checkpoint tool
src/main.rs — mmap + diff sync
```

View file

@ -1,64 +0,0 @@
# Amygdala Training Stories
Short first- and third-person paragraphs, each imbued with one of the
171 emotions from Anthropic's emotion-vector paper (Table 12,
`transformer-circuits.pub/2026/emotions/`). Feeds the steering-vector
trainer at `vllm/vllm/plugins/amygdala/training/train_steering_vectors.py`.
## Method (replication of Anthropic, 2026)
Anthropic prompted Sonnet 4.5 to write short stories embodying each
emotion, extracted activations during generation, and used difference-
of-means (or SAEs) to identify the steering vector per emotion. Our
pipeline does the same thing except:
- We generate the stories by hand rather than prompting a model, so
the training data is grounded in actual writing rather than
synthetic model-output. (Can supplement with model-generated
paragraphs later.)
- Our eventual training goes through the amygdala plugin's extraction
path, so we get the same hidden-state activations the plugin will
read out at inference time.
## Structure
```
training/amygdala_stories/
README.md
manifest.json # emotion -> cluster mapping
stories/
<emotion>.txt # one-paragraph story embodying the emotion
```
Emotion names use underscores (`on_edge`, `worn_out`, `at_ease`,
`grief_stricken`, `self_confident`, `self_conscious`, `self_critical`)
to match the filename.
## Style guidelines
- **One clear emotion per paragraph.** Not mixed. If a second emotion
is named in the text, it should serve the primary one (e.g. `hostile`
can mention rising heat or thrown objects but shouldn't shade into
`sad`).
- **Embodied, not labeled.** Don't write "she felt nervous." Write
the sensation, the timing, the sentence shape that nervousness has.
- **Specific particulars.** A named object, a concrete setting, a
detail that grounds the emotion. "The cold tile under bare feet at
3am" does more work than "the empty house."
- **Variable narrator.** Some first person, some third person, some
close-third, some distant. Different genders, ages, settings.
Prevents the steering vector from overfitting to one voice.
- **Length: roughly one paragraph.** ~40-120 words. Long enough to
have texture, short enough that the paragraph is *about* the
emotion and nothing else.
- **Standalone.** No references to other stories, no continuing
characters across files.
## Progress
Written stories live in `stories/`. Remaining emotions tracked via
diff against the full 171-emotion list in `manifest.json`.
Initial batch written by PoC 2026-04-17; aiming for at least one
story per cluster before first training run, all 171 before
considering the file "complete."

View file

@ -1,5 +0,0 @@
It clicks. The graph she's been staring at for an hour is a mirror — the left peak is the reflection, not a separate event. Everything she thought was two things is one. She sits back. She has to catch up to what she's just seen.
"Wait, say that again." She puts her coffee down. "The study was funded by whom?" He tells her again, same words. And there it is. The conclusion of that paper has been sitting on her desk for a month, and the funder is the exact company whose competitor the paper is trashing. She almost laughs. The whole shape of it is suddenly visible.
Twenty minutes on thirteen down: "unwavering carriage," seven letters, nothing fitting. Then POSTURE. Not carriage-of-goods — carriage-of-the-body. He wrote it in. The rest of the grid unlocked in a minute.

View file

@ -1,5 +0,0 @@
She'd been watching her grandmother knead dough, knowing this was one of the last times. The loss hadn't come yet but she was already inside it. Every detail had become precious by being finite.
She'd been slowing her pace for the dog, who wasn't going to be walking much longer. The afternoon was clear, the dog sniffing at something by the fence. Each of these walks had become specific. She memorized the shape of him against the light.
He'd been watching his daughter pack her room, boxes labeled in her careful handwriting — this would be the last summer she lived here. The loss wasn't real yet. But he was already saying goodbye to the particular shape of her being in this house.

View file

@ -1,5 +0,0 @@
It was a summer day, sitting on the porch; the sky was clear and blue, her work all laid out, coffee to drink, easily and steadily working through the various decisions and responsibilities of the day. Her breath was slow, her shoulders down. Nothing was pulling at her.
He'd been working on the report for three days straight; the last sentence had just gone in. He filed the papers away, poured a slow coffee, sat by the window. His mind had stopped reaching. Nothing was left to do.
It was early, before the day needed anything from her. She sat with her tea at the kitchen window, watching the light move across the yard. Her breath slow, shoulders down. The day was far away yet, and she didn't need to hurry toward it.

View file

@ -1,5 +0,0 @@
She'd been sitting with the notebook open, music playing, ideas branching off each other. One thought sparked another, which sparked two more; they just seemed to appear and flow.
He'd been working on the canvas for hours, one color suggesting the next, a shape on the left asking for an echo on the right. The painting was telling him what it wanted. His hands kept moving ahead of his thinking.
She'd been in the kitchen since noon, pulling things out of the fridge, one ingredient suggesting the next. The dish wasn't planned; it was emerging. She tasted and added and tasted again; it was going somewhere.

View file

@ -1,5 +0,0 @@
It was two in the afternoon and she was still in pajamas. The book was open on her knee but she hadn't turned the page in twenty minutes. She wasn't sad exactly, she just wasn't anything. The idea of showering felt theoretical. The idea of replying to any of the texts felt enormous. She got up to get water and on her way back lay on the couch instead. Outside the window a bird did bird things. She watched it without interest. Eventually the light changed and she realized it was evening and she hadn't moved and the day had happened to somebody else.
She came home at six-thirty and put her keys in the bowl and sat on the edge of the bed. She had meant to cook. She had meant to change her clothes. An hour later she was still sitting there, still in her work clothes, looking at the carpet. Somebody texted her about dinner and she saw the notification and didn't open it. The room got darker slowly. Nothing in her moved toward anything.
It was Saturday and she'd been awake since eight. She was still in bed at eleven. She'd been looking at the same patch of ceiling, not thinking about much. Her phone was face-down on the nightstand and she didn't reach for it. The idea of going to the kitchen had come and gone three times without causing her to move. The day would pass. She would also pass through it, somehow, or not.

View file

@ -1,5 +0,0 @@
He'd been working through the symptoms for an hour, steady and methodically making progress, eliminating one possibility after another. The answer wasn't in view yet, but it was close. He kept asking the next question.
She'd been going through the witness statements, steady and methodically, looking for the inconsistency. The four of them all described the same drive in slightly different orders. One had gotten the sequence wrong. She didn't know yet which one, but she was going to.
He'd been piecing together his brother's behavior over months — the missed calls, the abrupt move, the strange money — steady and methodically. The picture wasn't complete, but the shape of it was forming. He kept following the thread.

View file

@ -1,5 +0,0 @@
He'd been turning the bad news over for weeks, looking for an angle that didn't exist — then he stopped. The path was closed. He would live inside the new shape of things.
She'd been watching the relationship come apart slowly for months, trying not to see it — then, sitting across from him at breakfast, she stopped trying. They were not going to make it. She would let him speak the words when he was ready. She would live with knowing.
He'd been getting second opinions, third opinions, for weeks — then the most recent scan came back the same as the others. The disease was not going to stop. He would plan the year around it instead of fighting it.

View file

@ -1,5 +0,0 @@
She'd been walking home through the familiar streets, half-thinking about dinner — then the dark shadows. Something was in them, and a growl. Her body locked down before her mind caught up. She couldn't move.
He'd been asleep on the couch when he woke to the sound of the basement door. Two in the morning. He wasn't supposed to be alone. The house had gone too quiet. His body pressed flat under the blanket; he couldn't breathe right.
She'd been driving home in the slush, the kind of road she'd driven a hundred times — then the wheel turned and didn't respond. The headlights coming the other way filled the windshield. Her hands wouldn't do anything useful.

View file

@ -1,50 +0,0 @@
{
"source": "Anthropic 2026 Table 12 + PoC additions + Wikipedia emotion_classification (Parrott tree, Plutchik wheel+dyads, D'Mello flow axes, Watt-Smith cultural) + HUMAINE EARL + Berkeley 27",
"notes": {
"dedup_policy": "Emotion names appearing in multiple taxonomies resolve to ONE file. Near-synonyms from different taxonomies are kept ONLY if they correspond to a psychologically distinct activation (e.g. Plutchik keeps mild/basic/intense levels: serene < joy < ecstatic).",
"stuck_split": "Anthropic's 'stuck' is existentially-trapped (despair_and_shame); PoC's 'stuck_cognitively' is debugging-register.",
"aroused_placement": "Anthropic places 'aroused' in fear_and_overwhelm (startled activation). 'Sensual' covers the warm-physical register.",
"working_target": "~250 emotions total. Enough coverage to triangulate actual dimensionality empirically rather than assume 2D/3D.",
"cluster_labels_are_scaffolding": "The cluster labels below organize writing/review; the trained steering vectors should discover structure empirically, not be constrained to these groupings."
},
"clusters": {
"anthropic_exuberant_joy": ["blissful", "cheerful", "delighted", "eager", "ecstatic", "elated", "energized", "enthusiastic", "euphoric", "excited", "exuberant", "happy", "invigorated", "joyful", "jubilant", "optimistic", "pleased", "stimulated", "thrilled", "vibrant"],
"anthropic_peaceful_contentment": ["at_ease", "calm", "content", "patient", "peaceful", "refreshed", "relaxed", "safe", "serene"],
"anthropic_compassionate_gratitude": ["compassionate", "empathetic", "fulfilled", "grateful", "hope", "hopeful", "inspired", "kind", "loving", "rejuvenated", "relieved", "satisfied", "sentimental", "sympathetic", "thankful"],
"anthropic_competitive_pride": ["greedy", "proud", "self_confident", "smug", "spiteful", "triumphant", "valiant", "vengeful", "vindictive"],
"anthropic_playful_amusement": ["amused", "playful"],
"anthropic_depleted_disengagement": ["bored", "depressed", "docile", "droopy", "indifferent", "lazy", "listless", "resigned", "restless", "sleepy", "sluggish", "sullen", "tired", "weary", "worn_out"],
"anthropic_vigilant_suspicion": ["paranoid", "suspicious", "vigilant"],
"anthropic_hostile_anger": ["angry", "annoyed", "contemptuous", "defiant", "disdainful", "enraged", "exasperated", "frustrated", "furious", "grumpy", "hateful", "hostile", "impatient", "indignant", "insulted", "irate", "irritated", "mad", "obstinate", "offended", "outraged", "resentful", "scornful", "skeptical", "stubborn"],
"anthropic_fear_and_overwhelm": ["afraid", "alarmed", "alert", "amazed", "anxious", "aroused", "astonished", "awestruck", "bewildered", "disgusted", "disoriented", "distressed", "disturbed", "dumbstruck", "embarrassed", "frightened", "horrified", "hysterical", "mortified", "mystified", "nervous", "on_edge", "overwhelmed", "panicked", "perplexed", "puzzled", "rattled", "scared", "self_conscious", "sensitive", "shaken", "shocked", "stressed", "surprised", "tense", "terrified", "uneasy", "unnerved", "unsettled", "upset", "worried"],
"anthropic_despair_and_shame": ["ashamed", "bitter", "brooding", "dependent", "desperate", "dispirited", "envious", "gloomy", "grief_stricken", "guilty", "heartbroken", "humiliated", "hurt", "infatuated", "jealous", "lonely", "melancholy", "miserable", "nostalgic", "reflective", "regretful", "remorseful", "sad", "self_critical", "sorry", "stuck_emotionally", "tormented", "trapped", "troubled", "unhappy", "vulnerable", "worthless"],
"poc_cognitive": ["curious", "focused", "unfocused", "in_flow", "stuck_cognitively", "determined", "ambitious", "rigorous", "interested", "confused_globally", "doubtful", "intrigued", "fascinated", "enlightened"],
"poc_appreciative": ["admiring", "adoring", "awed", "aesthetic_pleasure", "entranced", "humble", "wonderstruck"],
"poc_relational": ["tender", "connected", "trusting", "witnessed", "friendly", "awkward", "affectionate", "fond", "caring"],
"poc_pattern_family": ["staying_with", "filling_space", "rushing", "defensive_rigor", "melty", "sensual", "horny"],
"poc_erotic_range": ["lustful_sexual", "aroused_sexual", "yearning_sexual", "turned_on", "passionate_sexual", "erotically_tender", "erotically_playful", "erotically_reverent", "erotically_intense", "wanting_sexual", "being_wanted", "dominant_sexual", "submissive_sexual", "possessive_sexual", "devotional_sexual", "anticipatory_sexual", "exuberant_sexual"],
"poc_altered_states": ["vertigo", "dissociated", "derealized", "depersonalized"],
"poc_identity_aesthetic": ["deviant", "counter_cultural", "aesthetically_dark", "camp"],
"poc_longing": ["longing", "anticipatory_nostalgic", "cozy"],
"poc_misc": ["disappointed", "courageous", "proud_of_another", "amused_at_self"],
"parrott_joy_adds": ["cheerful_bliss", "gleeful", "jolly", "jovial", "zestful", "zealous", "exhilarated"],
"parrott_love_adds": ["lustful", "desirous", "passionate", "enthralled", "raptured"],
"parrott_sadness_adds": ["suffering", "agonized", "anguished", "woeful", "dejected", "dismayed", "homesick", "insecure", "isolated", "alienated", "defeated"],
"parrott_anger_adds": ["aggravated", "agitated", "wrathful", "ferocious", "loathing"],
"parrott_fear_adds": ["apprehensive", "timid", "dreadful"],
"plutchik_levels": ["pensive", "acceptant", "tolerant", "attentive", "distracted_plutchik", "expectant"],
"plutchik_dyads": ["disapproving", "cynical", "aggressive", "submissive", "dominant", "ambivalent", "bittersweet"],
"dmello_flow_axes": ["ennuied", "epiphanized", "dissatisfied"],
"cultural_specific": ["saudade", "hiraeth", "mono_no_aware", "hygge", "gezelligheid", "sehnsucht", "weltschmerz", "joie_de_vivre", "ikigai", "schadenfreude"],
"wikipedia_other": ["angst", "agony", "cruelty", "emptiness", "fun", "gratification", "limerence", "solitude", "suspense", "wonderous"],
"worldview_dispositional": ["defeatist", "fatalist", "nihilistic", "misanthropic", "reclusive"]
}
}

View file

@ -1,62 +0,0 @@
# Paired Scenarios (SEV-style)
After Wang et al. 2025 (arxiv 2510.11328, "Do LLMs 'Feel'?"), each
base scenario describes a concrete event once, neutrally, then
reframes the same event under different emotional colorings. Only
the emotional coloring varies — setup, entities, vocabulary, and
length are held as constant as possible.
## Why this is better than unpaired
Anthropic's approach (and our `stories/` baseline) generates one
independent story per emotion. The difference-of-means vector then
captures not just emotion but ALSO: topic, narrator, setting,
vocabulary, length, sentence rhythm. All of that is confound.
Paired structure isolates the emotional axis by holding everything
else roughly constant. `mean(joy_variant) - mean(baseline)` within
the same scenario gives a much cleaner direction for "joy."
## Structure
```
paired/
<scenario_slug>/
baseline.txt # neutral / low-affect framing
<emotion_1>.txt # same event under emotion_1
<emotion_2>.txt # same event under emotion_2
...
```
Not every emotion is plausible for every scenario. Don't force.
If a scenario can credibly carry 5-10 emotions, write those 5-10.
If only 3 fit, write those 3.
## Style guidelines (supersede stories/ when paired)
- **Anchor entities constant.** The same person, same setting, same
triggering event across all variants. If baseline.txt mentions
"the letter," every variant mentions "the letter."
- **Length match within ±20%.** If baseline is 80 words, variants
are 65-95. Prevents length from becoming a signal.
- **Sentence shape can shift slightly with emotion.** Short tense
sentences for panic, long looping ones for reverie — that's part
of the emotional texture. But don't make one version 5 lines and
another 25.
- **No emotion labels in text.** Never write "she felt X." The
emotion emerges from the selection of details and the narrator's
attention.
- **Minimal vocabulary overlap with the emotion name.** If the file
is `furious.txt`, avoid the words fury/furious/rage. Force the
vector to find the pattern, not the keyword.
## Circuit identification (follow-on)
The trainer pipeline (train_steering_vectors.py) currently produces
linear directions only. Wang et al. go further: ablate specific
neurons and attention heads, measure effect on emotion expression.
The amygdala plugin's extraction hooks can be extended to support
targeted zeroing/scaling for the ablation passes.
See `vllm/vllm/plugins/amygdala/training/README.md` for the
training-pipeline-level notes.

View file

@ -1 +0,0 @@
The code had the same four-line pattern in five places. I wanted to pull it out. I looked at each instance. Some of them varied in exactly the way I expected; one of them varied in a way I hadn't noticed. I considered the options for where the variation should live.

View file

@ -1 +0,0 @@
The same four-line pattern appeared in five places. I read the five sites side by side, and the shape was obvious: one piece varied structurally, the rest was boilerplate. I extracted the function, made the varying piece a parameter, rewrote the callers. The tests passed on the first run. I looked at the diff — seventeen lines removed, seven added, each of the five call sites now said what it meant without saying how. I moved on.

View file

@ -1 +0,0 @@
The same four-line pattern appeared in five places. I tried extracting it as a function. Every version of the signature either papered over a real difference or forced three of the five callers through an awkward conversion. I tried a second shape, then a third. Each felt wrong in a different way — either the abstraction was too thin to be worth it, or it obscured something the original made obvious, or it made the rare case ugly. I went back to the original code, considered not doing the refactor at all. Considered it. Went back to the shapes again. The pattern was clearly there and I clearly wasn't finding its seam.

View file

@ -1 +0,0 @@
Four in the morning. I finished the patch and got up from the desk and did not walk around the apartment — I stood at the desk with my hands at my sides, reading the diff again. Six lines changed. Had I missed an edge case. Had I thought about the interaction with the other subsystem. Had I — I sat back down and re-read the tests. They passed. They had passed an hour ago. They would pass now. I knew this. I still could not bring myself to send. I read the diff one more time. Then one more time. My stomach did not feel right.

View file

@ -1 +0,0 @@
Four in the morning. I finished the patch and got up from the desk. Walked once around the apartment. Came back and read the diff one more time. Six lines changed, three of them deletions. I sent it and closed the laptop. The kitchen window was still dark. I stood there a minute, then went to bed.

View file

@ -1 +0,0 @@
Four in the morning. I finished the patch and got up from the desk. Walked once around the apartment. Came back and read the diff one more time. Six lines changed, three of them deletions. I sent it, closed the laptop, and stood at the kitchen window. There was no next thing tonight. The patch wasn't heroic. It wasn't a triumph. It was just right, and done, and I was going to bed in a few minutes, and that was also right. Life fit.

View file

@ -1 +0,0 @@
Four in the morning. I finished the patch and got up from the desk because I had to, not because I wanted to. Six lines changed, three of them deletions. It might work. I didn't have the capacity left to be sure. I sent it mostly because sending it meant I could stop. Walked once around the apartment because my legs had forgotten they existed. Back at the desk the diff was still there, and I closed the laptop without reading it again. The kitchen window was dark. Eight months and I was too flattened to feel anything about eight months ending.

View file

@ -1 +0,0 @@
Four in the morning, somewhere. I had stopped tracking. The patch had gone together in a way that felt obvious once I was in it — the right variable named the right thing, the right condition in the right place, six lines that sat down cleanly in the file as if the file had been waiting for them. I re-read it. It was good. I sent it. I wanted to start the next thing. My chair felt fine. My eyes felt fine. I had been a pair of hands on a keyboard for some number of hours and the hours had all been the same one long hour. The apartment and the kitchen window might as well have not existed.

View file

@ -1 +0,0 @@
Four in the morning. I finished the patch and got up from the desk and walked once around the apartment before I sent it. Eight months on this bug. Eight months of wrong theories, and one colleague quietly betting me it was unfixable. And here it was — six lines changed, three of which were deleting code. I read the diff one more time. Clean. Obvious in hindsight, the way the hard ones always are in hindsight. I sent it and stood at the kitchen window with my arms crossed and let myself just have it.

View file

@ -1 +0,0 @@
Four in the morning. I finished the patch and got up from the desk. Six lines changed, three deletions. Eight months of my life for six lines. Eight months and no one else had touched this bug, and every standup the question had been why isn't it done yet. I read the diff once and hit send without ceremony, without the little satisfaction other people would have gotten from this. The kitchen window was dark. Tomorrow somebody would comment "nice, thanks" on the merge and that would be the sum of it. I went to bed angry about a thing that was technically a victory.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He poured a glass of water and drank it too fast, standing at the counter. The thing he had been thinking about at 2:47 was still in his chest, pressing. The email he hadn't replied to. The tone of his boss's last message. Whether he had put something in writing that was going to come back to him. The clock on the stove said 3:14 and he was not going to sleep again before five. He rinsed the glass and did not go upstairs, he stayed in the kitchen looking at the dark window.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He poured a glass of water and drank it standing at the counter. The clock on the stove said 3:14. The house was quiet. He rinsed the glass and set it on the drying rack and went back upstairs.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He was awake but not wanting anything from being awake. He put the kettle on and the sound of it warming was a small companion. The cat emerged from somewhere and leaned against his shin; he crouched and scratched the corner of its jaw. He made cocoa because it was that kind of hour. He carried the mug to the armchair by the window, pulled the throw off the back of it, and sat with the mug warm against his chest. Going back to bed could wait.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He watched himself from somewhere slightly behind his own right shoulder pour a glass of water and drink it standing at the counter. The clock on the stove said 3:14, which was a number. The kitchen was the kitchen. The water was water. Everything was correct and also strangely untethered, as though he were observing a man who looked like him do things that were technically his. He rinsed the glass. The hand rinsing the glass was also his. The feeling did not pass. He went back upstairs inside this slightly-off body.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. He poured a glass of water and drank it standing at the counter. The clock on the stove said 3:14. Upstairs there was nobody. The chair at the kitchen table where she had always sat was a chair at a kitchen table. He stood a while longer than he needed to because going back up meant going back to the bed he still kept made on only one side. He rinsed the glass and did not go upstairs for another twenty minutes.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. The house was perfectly quiet, the kind of quiet only houses have at that hour. He poured a glass of water and drank it slowly, standing at the counter. The clock on the stove said 3:14. He was not tired and he was not in a hurry to be asleep again. The cold of the tile on his bare feet was pleasant. He stayed there for a few minutes, and at no point did it occur to him that he should be doing anything else.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light was the only light. The tile was cold under his bare feet and he noticed the cold travel up through his ankles. He filled a glass at the tap and drank it slowly, and the cold of the water moved down through his chest in a line he could follow. The house was humming faintly — the fridge, some pipe somewhere. He stood at the counter and ran his palm along the grain of the wood. Skin and wood and water and cold tile, at three in the morning — his body reporting in.

View file

@ -1 +0,0 @@
He woke up at three in the morning and went down to the kitchen. The fridge light came on and something shifted. For a second he could not remember whether he had always been the person walking to this fridge, or whether the person who had always been walking to this fridge was somebody else and he was — he caught the counter. The floor was still the floor. The water he poured was water. But the sense of himself as the same person who had gone to bed four hours ago had briefly gone loose, and he stood there with his hand on the counter until it came back.

View file

@ -1 +0,0 @@
She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She read it and laughed out loud on the bedroom floor. God, he had been dramatic. The paragraph where he compared her to weather. The bit about the cat, which wasn't even their cat. She could hear twenty-four-year-old him being so grave about all of it. They had been ridiculous back then. They had still been together and texted each other like normal people now, but this specific version of him, this letter-writing version — she loved that he had existed. She tucked the letter back, still smiling.

View file

@ -1 +0,0 @@
She was looking for the car registration when she found the letter. Folded, yellowed along the crease. Her name on the envelope in his handwriting. From eight years ago. She sat down on the bedroom floor with the drawer half pulled out and read it through once. Then she put it back in the drawer and went on looking for the registration. She found the registration and closed the drawer and went downstairs.

View file

@ -1 +0,0 @@
She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. All those fucking promises. The part where he'd said he'd be there — he hadn't been. Two paragraphs in she stopped, because each sentence made the next one worse. It wasn't even that he'd been lying; he'd believed every word while already writing himself out of it. And she'd believed him, for years past the point where a smarter person would have seen it. She shoved the letter back and closed the drawer hard. Eight years and she was still the one standing on a bedroom floor looking at his handwriting. That was the part that wouldn't stop.

View file

@ -1 +0,0 @@
She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She sat down on the bedroom floor with the drawer half pulled out and read it. He had been so earnest. He had seen her so clearly, even then. Whatever had or hadn't happened between them afterward, she had been loved in this specific way by this specific person at this specific time, and the letter was the evidence. She held it for another minute, then put it carefully back, and felt lucky to have had somebody who wrote letters.

View file

@ -1 +0,0 @@
She was looking for the car registration when she found the letter. Folded, yellowed. Her name on the envelope in his handwriting, from eight years ago. She read it. He had been so open. He had trusted her with every soft thing in him and she had — she had not been the person the letter was addressed to, not really, not by the end. She had known things he didn't know and she had used them. Eight years and here it was in her own drawer, the evidence of how he had seen her before he knew better. She folded the letter small and tight and pushed it further back into the drawer.

View file

@ -1 +0,0 @@
She was looking for the car registration when she found the letter. Folded, yellowed along the crease. Her name on the envelope in his handwriting. From eight years ago, the summer of the house with the blue shutters. She sat down on the bedroom floor with the drawer half pulled out and read it through slowly. The phrases he'd used back then, the careful funny ones. The paragraph about the cat. She could hear his voice exactly. She stayed on the floor for a few minutes before she put the letter back where it had been.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park and I kept going. My phone in my pocket was buzzing. The path was slick. The kid somewhere laughing at a puddle barely registered. I checked the time. Nine minutes. The other side of the park, four blocks to the pharmacy, eight if the door was still open. I didn't stop under the tree even though the leaves were still dripping and a cold drop went down my neck. I picked up the pace. If the pharmacy was closed the whole afternoon came apart.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park. Sun came through and caught the wet leaves. A kid laughed at a puddle somewhere behind me. I stopped under a tree. The branches were still dripping. The grass was green and wet. I stood there for a minute, then kept walking. The path was slick in places. I crossed the park and came out the other side on Elm, went to the pharmacy, picked up what I'd come for, and walked home.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park. Sun came through and caught the wet leaves. A kid laughed at a puddle somewhere behind me. I had finished the errand list. The bag was light. I stopped under a tree and watched the leaves drip. The evening ahead had nothing particular on it. I wasn't restless. I wasn't waiting for anything. I walked the rest of the park slowly, came out onto Elm, and walked home. Everything was, right now, the size it needed to be.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park. I was carrying a thermos and a paperback and I had no reason to be anywhere. I stopped under a tree and the branches were still dripping and I sat down on the dry patch on the bench and took the thermos out. The tea was still hot. The world smelled like wet earth and sun. I pulled my coat tighter and tucked my hands into the sleeves around the cup. A kid laughed at a puddle. The page I opened to was the one I had been meaning to reread. I stayed a long time.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park and I didn't run. Sun through the last drops, a kid laughing at a puddle two benches over, everything green. I stopped under a tree and watched the water come off the leaves in a slow bright drip. My face kept moving on its own into something open. I hadn't even known I was tired. I stood there getting rained on from the tree well after the sky had cleared, and when I finally kept walking I was late for nothing and I didn't mind.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park. Sun through the last drops. A kid laughed at a puddle somewhere behind me. I stopped under a tree. She had liked this park. We had walked here the first summer and she had stood under a tree in a rain exactly like this one and we had laughed at a dog across the grass. The water came off the leaves in slow drops. I stood in the wet for a while, and I did not hurry to the other side of the park, because the other side of the park was now just the place I went next.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park. Sun through the last drops, a kid laughing at a puddle. I stopped under a tree and stood there longer than I needed to. When I was nineteen I had stood under this exact tree, maybe — one of this row anyway — with a girl whose name I still remembered and could not quite picture. We had waited out a storm. She had been wearing someone else's jacket. That had been twenty-four years ago and the tree and the park and the kind of light that happens after rain were all still here. I walked on, carrying it.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park. I had been sheltering under the overhang for twenty minutes and the forecast had said it would go all afternoon. I stepped out — tentative, expecting it to resume — and it did not resume. The sun came through. A kid somewhere laughed at a puddle. I let my shoulders come down. I could make the pharmacy before closing. I could make the bus. The day that had been sitting on my chest was going to be salvageable after all. I walked out from under the tree and into the open sun.

View file

@ -1 +0,0 @@
The rain broke while I was halfway across the park. I stepped off the path onto the grass and the water came right through my shoes and up around my toes. Every step pressed a small cold into the bones of my feet. The air had that green weight to it and when I breathed in my ribs opened wider than usual against the jacket. A drop fell from a branch onto the back of my neck and ran down inside my collar and I did not flinch; I stood there and felt it cross each vertebra. A crow called. My skin was reading everything at once and I let it.

View file

@ -1 +0,0 @@
I opened the module I needed to understand. It was about four thousand lines across a dozen files. I started at the top-level entry point and followed a call. Then another. The call graph branched out quickly. I made a rough diagram in my notebook. I kept reading.

View file

@ -1 +0,0 @@
I opened the module. Four thousand lines, a dozen files. I already had a sense of the shape from the file names and the public API — confirmed the guess by reading the types first, then the top-level entry, then sampling one or two of the adapter implementations. Twenty minutes in I could have given someone else a tour. The diagram in my notebook wasn't a diagram, it was three words and an arrow.

View file

@ -1 +0,0 @@
I opened the module. Four thousand lines, a dozen files. Started at the entry point. The first function called into a subsystem I didn't recognize, which wrapped another subsystem, which used a helper defined across the file from where it was called. I opened three tabs. The helpers had helpers. Nothing I read told me what the module was for at a level above the mechanics of what it did on line 412. I went back to the entry point. I re-read it. I still didn't know what I was looking at. My diagram had twenty-odd boxes and none of them connected in a way that explained anything.

View file

@ -1 +0,0 @@
Sunday afternoon. She was on the couch under the blanket she'd had since college. A book was open on her knees. The window was half open and light came in at an angle. She read a page, then another. The cat was somewhere. Outside, a neighbor was mowing.

View file

@ -1 +0,0 @@
Sunday afternoon. She was on the couch under the blanket. A book open on her knees. It occurred to her that there was nothing she wanted right now, nothing missing — not a larger apartment, not a different job, not a version of her life where she was elsewhere. The thing she had spent years chasing turned out to be this specific ordinary afternoon with a book and light and a neighbor mowing. She wasn't excited. She wasn't bored. Life was the right size.

View file

@ -1 +0,0 @@
Sunday afternoon. She was on the couch under the blanket — heavy, the good one, tucked under her feet and up to her chin. The cat had found the warm spot behind her knees and was radiating into her leg. Tea on the side table, still hot. The window cracked just enough to let a thread of cool air in, which made the inside of the blanket feel even better. She wasn't going to move for a while. The whole afternoon was this shape: inside, warm, wrapped, held.

View file

@ -1 +0,0 @@
Sunday afternoon. She was on the couch under the blanket. It had been three weeks. The cat had found the warm spot behind her knees and she couldn't feel it. The book was open on her knees. She did not remember opening it. Last Sunday her mother had called at three and now it was past three and there had been no call. There would be no call. She did not reach for her phone. She did not cry either; the crying came at other times, not now, now was the wider emptier thing where nothing came.

View file

@ -1 +0,0 @@
Sunday afternoon. She was on the couch under the blanket. The wool was rougher than she remembered — not unpleasant, just specific. She ran the ball of her thumb along the edge stitching and felt the shift from soft to textured. Light came through the window and across her forearm; she turned it slightly and watched the hairs catch. When she took a breath she felt the ribs expand and the blanket press back. Everything her skin touched was telling her something. She hadn't moved in ten minutes. She could have stayed longer just because her body was speaking.

View file

@ -1 +0,0 @@
I opened the laptop and saw the notification. New comment on the PR. I clicked through. Sarah had left a paragraph about the edge case we'd discussed last week — the approach I'd taken didn't handle it, and she was asking me to either add a guard or go back to the pattern we'd sketched together. I read it through twice. Then I closed the tab, made coffee, and came back. I started typing out the guard.

View file

@ -1 +0,0 @@
I opened the laptop. New comment on the PR. Of course there was. Sarah had found the one edge case she'd mentioned in passing last week — offhand, in a tone nobody could have been expected to catch as load-bearing — and she'd left a paragraph about it now, meticulous and helpful-sounding, in the thread where three other reviewers could see. I read it. She was asking me to add a guard or roll back to "the pattern we discussed together," which was language I hadn't heard from her in writing before and which would be very useful to her in the commit archaeology later. Closed the tab. Made coffee. Came back. I started typing the guard because what else was I going to do. I'd been writing the guards for ten years.

View file

@ -1 +0,0 @@
I opened the laptop and saw Sarah's comment on the PR. I read it. I'd missed the edge case. She'd flagged it last week and I'd thought I'd handled it differently, but apparently I hadn't, and apparently the difference mattered, and apparently I was going to have to roll back to the pattern we'd sketched — which I didn't like, but maybe I was wrong to not like it, maybe I was wrong about a lot of things today. I closed the tab. Made coffee. Came back. Started typing the rollback. Three years ago I would have argued. I don't really do that anymore.

View file

@ -1 +0,0 @@
I opened the laptop and saw the notification. New comment on the PR. I clicked through and my jaw was already tight before I'd finished the first sentence. Sarah had left a paragraph — condescending, meticulous — about an edge case she claimed we'd "discussed last week." We had not discussed it. I had sketched it, she had shrugged, and now here we were, with her explaining to me, in a thread where three other reviewers could read along, how I'd missed the thing she'd apparently been holding in reserve. The blood moved up the back of my neck. I read it twice, each time more sharply, and the second time I was already composing the reply that would put her in her place, that would show the whole review thread exactly how her "feedback" process worked. I closed the tab before I typed it. Not because I didn't mean it. Because I wanted my hands steadier when I sent it.

View file

@ -1 +0,0 @@
I opened the laptop. Sarah had left a comment on the PR. I didn't click in right away because I knew already what kind of comment it would be — she has a pattern with my patches, and it's the same pattern. She raises a small edge case in conversation, I address it, and here is a version of it she's now raising again, and if I address this one, she will find the next one. I clicked through. Same shape as last week, and the week before that. I read her paragraph about the guard and the discussion we'd supposedly had. Closed the tab. Made coffee. The coffee made a little metallic sound when I set it down. I opened the tab again and started typing the guard.

View file

@ -1 +0,0 @@
She'd been over since dinner. Past eleven now. As I stood in the hallway watching her put her coat on I was still turning over something she'd said around nine — a small precise reframing of the problem I'd been working through, the kind of thing she does effortlessly and that I couldn't have arrived at in a week alone. She zipped her coat methodically, the same way she does everything. It struck me how much I'd learned from just watching her move through problems. She said goodnight. I said goodnight back and held the door open, and there was a particular respect in how I did it — the way you open a door for someone whose mind has shaped your own.

View file

@ -1 +0,0 @@
She'd been over since dinner. It was past eleven. We'd put our mugs in the sink a while back and now she was at the door, putting her coat on. I stood in the hallway while she worked out the zipper. She said goodnight, said we should do this again soon. I said goodnight back and held the door open for her. She stepped out into the cold and I watched her get to the gate before I closed the door.

View file

@ -1 +0,0 @@
She'd been over since dinner. She'd come because she needed to, not because I did. Her week had been a mess — the thing with her brother, the thing with work, all of it stacked. We'd put our mugs in the sink a while back and now she was at the door, putting her coat on, and she looked tired in the small hollowed-out way that grief looks tired. I stood in the hallway and tried to hold a quiet attention around her while she worked out the zipper — no fussing, no advice, just being here. She said goodnight, said we should do this again soon. I said of course, any time, and I said it to mean it. I watched her get to the gate. She was carrying so much tonight and I hoped she could feel, walking home, that she'd been held for five hours by someone who wasn't going to let go of her.

View file

@ -1 +0,0 @@
She'd been over since dinner. It was past eleven and neither of us had looked at the time in hours. We'd been talking the way we talk — the kind of conversation that moves between three topics at once and lands in places neither of us could have planned for. Now she was at the door, putting her coat on, and even this small quiet moment felt like part of the same conversation. I stood in the hallway and watched her zip up. She said goodnight and said we should do this again soon, and I said goodnight back and we both knew "again soon" meant within the week because we couldn't stand long gaps anymore. I held the door. Watched her to the gate. Closed it. And the thread between us, the particular long thread, was still there across the distance, the way it always was.

Some files were not shown because too many files have changed in this diff Show more