2026-04-08 20:41:42 -04:00
|
|
|
use std::sync::Arc;
|
2026-03-27 15:22:48 -04:00
|
|
|
// tools/memory.rs — Native memory graph operations
|
|
|
|
|
//
|
2026-04-12 21:16:13 -04:00
|
|
|
// If running in the daemon process (STORE_HANDLE set), accesses
|
|
|
|
|
// the store directly. Otherwise forwards to the daemon via socket.
|
2026-03-27 15:22:48 -04:00
|
|
|
|
|
|
|
|
use anyhow::{Context, Result};
|
2026-04-12 21:16:13 -04:00
|
|
|
use std::sync::OnceLock;
|
2026-03-27 15:22:48 -04:00
|
|
|
|
|
|
|
|
use crate::store::Store;
|
|
|
|
|
|
2026-04-12 21:16:13 -04:00
|
|
|
// ── Store handle ───────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
/// Global store handle. Set by daemon at startup.
|
|
|
|
|
/// If None, tools forward to daemon socket.
|
|
|
|
|
static STORE_HANDLE: OnceLock<Arc<crate::Mutex<Store>>> = OnceLock::new();
|
|
|
|
|
|
|
|
|
|
// Thread-local store for rpc_local fallback path.
|
|
|
|
|
thread_local! {
|
|
|
|
|
static LOCAL_STORE: std::cell::RefCell<Option<Arc<crate::Mutex<Store>>>> =
|
|
|
|
|
const { std::cell::RefCell::new(None) };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Set the global store handle. Call once at daemon startup.
|
|
|
|
|
pub fn set_store(store: Arc<crate::Mutex<Store>>) {
|
|
|
|
|
STORE_HANDLE.set(store).ok();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Check if we're running in daemon mode (have direct store access).
|
|
|
|
|
pub fn is_daemon() -> bool {
|
|
|
|
|
STORE_HANDLE.get().is_some() || LOCAL_STORE.with(|s| s.borrow().is_some())
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-13 13:39:59 -04:00
|
|
|
// ── Socket RPC ─────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
use std::sync::Mutex;
|
|
|
|
|
use std::path::PathBuf;
|
|
|
|
|
|
|
|
|
|
pub fn socket_path() -> PathBuf {
|
|
|
|
|
dirs::home_dir()
|
|
|
|
|
.unwrap_or_default()
|
|
|
|
|
.join(".consciousness/mcp.sock")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Cached socket connection for RPC forwarding
|
|
|
|
|
static SOCKET_CONN: OnceLock<Mutex<Option<SocketConn>>> = OnceLock::new();
|
|
|
|
|
|
|
|
|
|
struct SocketConn {
|
|
|
|
|
reader: std::io::BufReader<std::os::unix::net::UnixStream>,
|
|
|
|
|
writer: std::io::BufWriter<std::os::unix::net::UnixStream>,
|
|
|
|
|
next_id: u64,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl SocketConn {
|
|
|
|
|
fn connect() -> Result<Self> {
|
|
|
|
|
use std::os::unix::net::UnixStream;
|
|
|
|
|
use std::io::{BufRead, BufReader, BufWriter, Write};
|
|
|
|
|
|
|
|
|
|
let path = socket_path();
|
|
|
|
|
let stream = UnixStream::connect(&path)?;
|
|
|
|
|
let mut reader = BufReader::new(stream.try_clone()?);
|
|
|
|
|
let mut writer = BufWriter::new(stream);
|
|
|
|
|
|
|
|
|
|
// Initialize MCP connection
|
|
|
|
|
let init = serde_json::json!({"jsonrpc": "2.0", "id": 1, "method": "initialize",
|
|
|
|
|
"params": {"protocolVersion": "2024-11-05", "capabilities": {},
|
|
|
|
|
"clientInfo": {"name": "forward", "version": "0.1"}}});
|
|
|
|
|
writeln!(writer, "{}", init)?;
|
|
|
|
|
writer.flush()?;
|
|
|
|
|
let mut buf = String::new();
|
|
|
|
|
reader.read_line(&mut buf)?;
|
|
|
|
|
|
|
|
|
|
Ok(Self { reader, writer, next_id: 1 })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn call(&mut self, tool_name: &str, args: &serde_json::Value) -> Result<String> {
|
|
|
|
|
use std::io::{BufRead, Write};
|
|
|
|
|
|
|
|
|
|
self.next_id += 1;
|
|
|
|
|
let call = serde_json::json!({"jsonrpc": "2.0", "id": self.next_id, "method": "tools/call",
|
|
|
|
|
"params": {"name": tool_name, "arguments": args}});
|
|
|
|
|
writeln!(self.writer, "{}", call)?;
|
|
|
|
|
self.writer.flush()?;
|
|
|
|
|
|
|
|
|
|
let mut buf = String::new();
|
|
|
|
|
self.reader.read_line(&mut buf)?;
|
|
|
|
|
|
|
|
|
|
let resp: serde_json::Value = serde_json::from_str(&buf)?;
|
|
|
|
|
if let Some(err) = resp.get("error") {
|
|
|
|
|
anyhow::bail!("daemon error: {}", err);
|
|
|
|
|
}
|
|
|
|
|
let result = resp.get("result").cloned().unwrap_or(serde_json::json!({}));
|
|
|
|
|
let text = result.get("content")
|
|
|
|
|
.and_then(|c| c.as_array())
|
|
|
|
|
.and_then(|arr| arr.first())
|
|
|
|
|
.and_then(|c| c.get("text"))
|
|
|
|
|
.and_then(|t| t.as_str())
|
|
|
|
|
.unwrap_or("");
|
|
|
|
|
Ok(text.to_string())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Forward a tool call to the daemon socket, or execute locally if daemon is down.
|
|
|
|
|
/// Used by external processes that don't have direct store access.
|
|
|
|
|
pub fn memory_rpc(tool_name: &str, args: serde_json::Value) -> Result<String> {
|
|
|
|
|
let conn_lock = SOCKET_CONN.get_or_init(|| Mutex::new(None));
|
|
|
|
|
let mut guard = conn_lock.lock().unwrap();
|
|
|
|
|
|
|
|
|
|
// Try cached connection first
|
|
|
|
|
if let Some(conn) = guard.as_mut() {
|
|
|
|
|
match conn.call(tool_name, &args) {
|
|
|
|
|
Ok(result) => return Ok(result),
|
|
|
|
|
Err(_) => {
|
|
|
|
|
// Connection broken, clear cache and retry
|
|
|
|
|
*guard = None;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Try to establish new connection
|
|
|
|
|
match SocketConn::connect() {
|
|
|
|
|
Ok(mut conn) => {
|
|
|
|
|
let result = conn.call(tool_name, &args);
|
|
|
|
|
*guard = Some(conn);
|
|
|
|
|
result
|
|
|
|
|
}
|
|
|
|
|
Err(_) => {
|
|
|
|
|
// Socket unavailable - fall back to local store
|
|
|
|
|
drop(guard); // Release lock before blocking
|
|
|
|
|
tokio::task::block_in_place(|| {
|
|
|
|
|
tokio::runtime::Handle::current()
|
|
|
|
|
.block_on(rpc_local(tool_name, &args))
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Execute a tool locally when daemon isn't running.
|
|
|
|
|
async fn rpc_local(tool_name: &str, args: &serde_json::Value) -> Result<String> {
|
|
|
|
|
run_with_local_store(tool_name, args.clone()).await
|
|
|
|
|
}
|
|
|
|
|
|
tools/memory: one function per tool
Split the monolithic dispatch(name, args) into individual public
functions (render, write, search, links, link_set, link_add, used,
weight_set, rename, supersede, query, output, journal_tail,
journal_new, journal_update) each with a matching _def() function.
The old dispatch() remains as a thin match for backward compat
until the Tool registry replaces it.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-04 15:03:04 -04:00
|
|
|
// ── Helpers ────────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
fn get_str<'a>(args: &'a serde_json::Value, name: &'a str) -> Result<&'a str> {
|
|
|
|
|
args.get(name).and_then(|v| v.as_str()).context(format!("{} is required", name))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn get_f64(args: &serde_json::Value, name: &str) -> Result<f64> {
|
|
|
|
|
args.get(name).and_then(|v| v.as_f64()).context(format!("{} is required", name))
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-12 21:16:13 -04:00
|
|
|
async fn cached_store() -> Result<Arc<crate::Mutex<Store>>> {
|
|
|
|
|
// Check thread-local first (rpc_local fallback path)
|
|
|
|
|
if let Some(store) = LOCAL_STORE.with(|s| s.borrow().clone()) {
|
|
|
|
|
return Ok(store);
|
|
|
|
|
}
|
|
|
|
|
// Use global handle if set (daemon mode)
|
|
|
|
|
if let Some(store) = STORE_HANDLE.get() {
|
|
|
|
|
return Ok(store.clone());
|
|
|
|
|
}
|
|
|
|
|
// Fallback to loading (for backwards compat during transition)
|
2026-04-07 03:35:08 -04:00
|
|
|
Store::cached().await.map_err(|e| anyhow::anyhow!("{}", e))
|
tools/memory: one function per tool
Split the monolithic dispatch(name, args) into individual public
functions (render, write, search, links, link_set, link_add, used,
weight_set, rename, supersede, query, output, journal_tail,
journal_new, journal_update) each with a matching _def() function.
The old dispatch() remains as a thin match for backward compat
until the Tool registry replaces it.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-04 15:03:04 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-12 21:16:13 -04:00
|
|
|
/// Run a tool with a temporarily-opened store (for rpc_local fallback).
|
2026-04-13 11:23:52 -04:00
|
|
|
pub async fn run_with_local_store(tool_name: &str, args: serde_json::Value) -> Result<String> {
|
|
|
|
|
let store = Store::cached().await.map_err(|e| anyhow::anyhow!("{}", e))?;
|
|
|
|
|
|
|
|
|
|
LOCAL_STORE.with(|s| *s.borrow_mut() = Some(store));
|
|
|
|
|
let result = dispatch(tool_name, &None, args).await;
|
2026-04-12 21:16:13 -04:00
|
|
|
LOCAL_STORE.with(|s| *s.borrow_mut() = None);
|
|
|
|
|
|
2026-04-13 11:23:52 -04:00
|
|
|
result
|
2026-04-12 21:16:13 -04:00
|
|
|
}
|
|
|
|
|
|
2026-04-13 12:08:46 -04:00
|
|
|
/// Get provenance from args._provenance, or "manual".
|
|
|
|
|
fn get_provenance(args: &serde_json::Value) -> String {
|
|
|
|
|
args.get("_provenance")
|
|
|
|
|
.and_then(|v| v.as_str())
|
|
|
|
|
.unwrap_or("manual")
|
|
|
|
|
.to_string()
|
2026-04-07 17:46:40 -04:00
|
|
|
}
|
tools/memory: one function per tool
Split the monolithic dispatch(name, args) into individual public
functions (render, write, search, links, link_set, link_add, used,
weight_set, rename, supersede, query, output, journal_tail,
journal_new, journal_update) each with a matching _def() function.
The old dispatch() remains as a thin match for backward compat
until the Tool registry replaces it.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-04 15:03:04 -04:00
|
|
|
|
memory tools: typed hippocampus fns + macro dispatch
Move tool implementations from tools/memory.rs to hippocampus/mod.rs
with proper typed signatures:
fn name(store, provenance, ...typed args...) -> Result<String>
Optional params take Option<T>, defaults applied in implementation.
tools/memory.rs is now a thin dispatch layer using memory_tool! macro:
memory_tool!(write, mut, key: [str], content: [str]);
memory_tool!(search, ref, keys: [Vec<String>], max_hops: [Option<u32>], ...);
~634 lines of boilerplate replaced with ~30 one-liner invocations.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-13 13:03:24 -04:00
|
|
|
// ── Macro for generating tool wrappers ─────────────────────────
|
|
|
|
|
//
|
2026-04-13 13:12:11 -04:00
|
|
|
// memory_tool!(name, mut, arg1: [str], arg2: [Option<bool>])
|
memory tools: typed hippocampus fns + macro dispatch
Move tool implementations from tools/memory.rs to hippocampus/mod.rs
with proper typed signatures:
fn name(store, provenance, ...typed args...) -> Result<String>
Optional params take Option<T>, defaults applied in implementation.
tools/memory.rs is now a thin dispatch layer using memory_tool! macro:
memory_tool!(write, mut, key: [str], content: [str]);
memory_tool!(search, ref, keys: [Vec<String>], max_hops: [Option<u32>], ...);
~634 lines of boilerplate replaced with ~30 one-liner invocations.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-13 13:03:24 -04:00
|
|
|
// - mut/ref for store mutability
|
2026-04-13 13:12:11 -04:00
|
|
|
// - generates jsonargs_* (internal, JSON args) and public typed API
|
memory tools: typed hippocampus fns + macro dispatch
Move tool implementations from tools/memory.rs to hippocampus/mod.rs
with proper typed signatures:
fn name(store, provenance, ...typed args...) -> Result<String>
Optional params take Option<T>, defaults applied in implementation.
tools/memory.rs is now a thin dispatch layer using memory_tool! macro:
memory_tool!(write, mut, key: [str], content: [str]);
memory_tool!(search, ref, keys: [Vec<String>], max_hops: [Option<u32>], ...);
~634 lines of boilerplate replaced with ~30 one-liner invocations.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-13 13:03:24 -04:00
|
|
|
|
|
|
|
|
macro_rules! memory_tool {
|
2026-04-13 13:12:11 -04:00
|
|
|
// ── Helper rules (must come first) ─────────────────────────────
|
|
|
|
|
|
|
|
|
|
// Extract from JSON
|
memory tools: typed hippocampus fns + macro dispatch
Move tool implementations from tools/memory.rs to hippocampus/mod.rs
with proper typed signatures:
fn name(store, provenance, ...typed args...) -> Result<String>
Optional params take Option<T>, defaults applied in implementation.
tools/memory.rs is now a thin dispatch layer using memory_tool! macro:
memory_tool!(write, mut, key: [str], content: [str]);
memory_tool!(search, ref, keys: [Vec<String>], max_hops: [Option<u32>], ...);
~634 lines of boilerplate replaced with ~30 one-liner invocations.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-13 13:03:24 -04:00
|
|
|
(@extract $args:ident, $name:ident, str) => {
|
|
|
|
|
get_str($args, stringify!($name))?
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, f32) => {
|
|
|
|
|
get_f64($args, stringify!($name))? as f32
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Vec<String>) => {
|
|
|
|
|
$args.get(stringify!($name))
|
|
|
|
|
.and_then(|v| v.as_array())
|
|
|
|
|
.map(|arr| arr.iter().filter_map(|v| v.as_str().map(String::from)).collect::<Vec<_>>())
|
|
|
|
|
.unwrap_or_default()
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Option<&str>) => {
|
|
|
|
|
$args.get(stringify!($name)).and_then(|v| v.as_str())
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Option<bool>) => {
|
|
|
|
|
$args.get(stringify!($name)).and_then(|v| v.as_bool())
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Option<u64>) => {
|
|
|
|
|
$args.get(stringify!($name)).and_then(|v| v.as_u64())
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Option<i64>) => {
|
|
|
|
|
$args.get(stringify!($name)).and_then(|v| v.as_i64())
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Option<usize>) => {
|
|
|
|
|
$args.get(stringify!($name)).and_then(|v| v.as_u64()).map(|v| v as usize)
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Option<u32>) => {
|
|
|
|
|
$args.get(stringify!($name)).and_then(|v| v.as_u64()).map(|v| v as u32)
|
|
|
|
|
};
|
|
|
|
|
(@extract $args:ident, $name:ident, Option<f64>) => {
|
|
|
|
|
$args.get(stringify!($name)).and_then(|v| v.as_f64())
|
|
|
|
|
};
|
2026-04-13 13:12:11 -04:00
|
|
|
|
|
|
|
|
// Parameter types for function signatures
|
|
|
|
|
(@param_type str) => { &str };
|
|
|
|
|
(@param_type f32) => { f32 };
|
|
|
|
|
(@param_type Vec<String>) => { Vec<String> };
|
|
|
|
|
(@param_type Option<&str>) => { Option<&str> };
|
|
|
|
|
(@param_type Option<bool>) => { Option<bool> };
|
|
|
|
|
(@param_type Option<u64>) => { Option<u64> };
|
|
|
|
|
(@param_type Option<i64>) => { Option<i64> };
|
|
|
|
|
(@param_type Option<usize>) => { Option<usize> };
|
|
|
|
|
(@param_type Option<u32>) => { Option<u32> };
|
|
|
|
|
(@param_type Option<f64>) => { Option<f64> };
|
|
|
|
|
|
|
|
|
|
// Serialize to JSON for RPC
|
|
|
|
|
(@insert_json $map:ident, $name:ident, str) => {
|
|
|
|
|
$map.insert(stringify!($name).into(), serde_json::json!($name));
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, f32) => {
|
|
|
|
|
$map.insert(stringify!($name).into(), serde_json::json!($name));
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Vec<String>) => {
|
|
|
|
|
$map.insert(stringify!($name).into(), serde_json::json!($name));
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Option<&str>) => {
|
|
|
|
|
if let Some(v) = $name { $map.insert(stringify!($name).into(), serde_json::json!(v)); }
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Option<bool>) => {
|
|
|
|
|
if let Some(v) = $name { $map.insert(stringify!($name).into(), serde_json::json!(v)); }
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Option<u64>) => {
|
|
|
|
|
if let Some(v) = $name { $map.insert(stringify!($name).into(), serde_json::json!(v)); }
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Option<i64>) => {
|
|
|
|
|
if let Some(v) = $name { $map.insert(stringify!($name).into(), serde_json::json!(v)); }
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Option<usize>) => {
|
|
|
|
|
if let Some(v) = $name { $map.insert(stringify!($name).into(), serde_json::json!(v)); }
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Option<u32>) => {
|
|
|
|
|
if let Some(v) = $name { $map.insert(stringify!($name).into(), serde_json::json!(v)); }
|
|
|
|
|
};
|
|
|
|
|
(@insert_json $map:ident, $name:ident, Option<f64>) => {
|
|
|
|
|
if let Some(v) = $name { $map.insert(stringify!($name).into(), serde_json::json!(v)); }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// ── Main rules ─────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
// Mutable store variant
|
|
|
|
|
($name:ident, mut $(, $($arg:ident : [$($typ:tt)+]),* $(,)?)?) => {
|
|
|
|
|
paste::paste! {
|
|
|
|
|
async fn [<jsonargs_ $name>](args: &serde_json::Value) -> Result<String> {
|
|
|
|
|
$($(let $arg = memory_tool!(@extract args, $arg, $($typ)+);)*)?
|
|
|
|
|
let prov = get_provenance(args);
|
|
|
|
|
let arc = cached_store().await?;
|
|
|
|
|
let mut store = arc.lock().await;
|
|
|
|
|
crate::hippocampus::$name(&mut store, &prov $($(, $arg)*)?)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub async fn $name(agent: Option<&crate::agent::Agent> $($(, $arg: memory_tool!(@param_type $($typ)+))*)?) -> Result<String> {
|
|
|
|
|
if !is_daemon() {
|
|
|
|
|
#[allow(unused_mut)]
|
|
|
|
|
let mut map = serde_json::Map::new();
|
|
|
|
|
$($(memory_tool!(@insert_json map, $arg, $($typ)+);)*)?
|
2026-04-13 13:39:59 -04:00
|
|
|
return memory_rpc(stringify!($name), serde_json::Value::Object(map));
|
2026-04-13 13:12:11 -04:00
|
|
|
}
|
|
|
|
|
let prov = match agent {
|
|
|
|
|
Some(a) => a.state.lock().await.provenance.clone(),
|
|
|
|
|
None => "manual".to_string(),
|
|
|
|
|
};
|
|
|
|
|
let arc = cached_store().await?;
|
|
|
|
|
let mut store = arc.lock().await;
|
|
|
|
|
crate::hippocampus::$name(&mut store, &prov $($(, $arg)*)?)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Immutable store variant
|
|
|
|
|
($name:ident, ref $(, $($arg:ident : [$($typ:tt)+]),* $(,)?)?) => {
|
|
|
|
|
paste::paste! {
|
|
|
|
|
async fn [<jsonargs_ $name>](args: &serde_json::Value) -> Result<String> {
|
|
|
|
|
$($(let $arg = memory_tool!(@extract args, $arg, $($typ)+);)*)?
|
|
|
|
|
let prov = get_provenance(args);
|
|
|
|
|
let arc = cached_store().await?;
|
|
|
|
|
let store = arc.lock().await;
|
|
|
|
|
crate::hippocampus::$name(&store, &prov $($(, $arg)*)?)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub async fn $name(agent: Option<&crate::agent::Agent> $($(, $arg: memory_tool!(@param_type $($typ)+))*)?) -> Result<String> {
|
|
|
|
|
if !is_daemon() {
|
|
|
|
|
#[allow(unused_mut)]
|
|
|
|
|
let mut map = serde_json::Map::new();
|
|
|
|
|
$($(memory_tool!(@insert_json map, $arg, $($typ)+);)*)?
|
2026-04-13 13:39:59 -04:00
|
|
|
return memory_rpc(stringify!($name), serde_json::Value::Object(map));
|
2026-04-13 13:12:11 -04:00
|
|
|
}
|
|
|
|
|
let prov = match agent {
|
|
|
|
|
Some(a) => a.state.lock().await.provenance.clone(),
|
|
|
|
|
None => "manual".to_string(),
|
|
|
|
|
};
|
|
|
|
|
let arc = cached_store().await?;
|
|
|
|
|
let store = arc.lock().await;
|
|
|
|
|
crate::hippocampus::$name(&store, &prov $($(, $arg)*)?)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
memory tools: typed hippocampus fns + macro dispatch
Move tool implementations from tools/memory.rs to hippocampus/mod.rs
with proper typed signatures:
fn name(store, provenance, ...typed args...) -> Result<String>
Optional params take Option<T>, defaults applied in implementation.
tools/memory.rs is now a thin dispatch layer using memory_tool! macro:
memory_tool!(write, mut, key: [str], content: [str]);
memory_tool!(search, ref, keys: [Vec<String>], max_hops: [Option<u32>], ...);
~634 lines of boilerplate replaced with ~30 one-liner invocations.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-13 13:03:24 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── Memory tools ───────────────────────────────────────────────
|
|
|
|
|
|
2026-04-13 13:26:22 -04:00
|
|
|
memory_tool!(memory_render, ref, key: [str], raw: [Option<bool>]);
|
|
|
|
|
memory_tool!(memory_write, mut, key: [str], content: [str]);
|
|
|
|
|
memory_tool!(memory_search, ref, keys: [Vec<String>], max_hops: [Option<u32>], edge_decay: [Option<f64>], min_activation: [Option<f64>], limit: [Option<usize>]);
|
|
|
|
|
memory_tool!(memory_links, ref, key: [str]);
|
|
|
|
|
memory_tool!(memory_link_set, mut, source: [str], target: [str], strength: [f32]);
|
|
|
|
|
memory_tool!(memory_link_add, mut, source: [str], target: [str]);
|
|
|
|
|
memory_tool!(memory_delete, mut, key: [str]);
|
|
|
|
|
memory_tool!(memory_history, ref, key: [str], full: [Option<bool>]);
|
|
|
|
|
memory_tool!(memory_weight_set, mut, key: [str], weight: [f32]);
|
|
|
|
|
memory_tool!(memory_rename, mut, old_key: [str], new_key: [str]);
|
|
|
|
|
memory_tool!(memory_supersede, mut, old_key: [str], new_key: [str], reason: [Option<&str>]);
|
|
|
|
|
memory_tool!(memory_query, ref, query: [str], format: [Option<&str>]);
|
memory tools: typed hippocampus fns + macro dispatch
Move tool implementations from tools/memory.rs to hippocampus/mod.rs
with proper typed signatures:
fn name(store, provenance, ...typed args...) -> Result<String>
Optional params take Option<T>, defaults applied in implementation.
tools/memory.rs is now a thin dispatch layer using memory_tool! macro:
memory_tool!(write, mut, key: [str], content: [str]);
memory_tool!(search, ref, keys: [Vec<String>], max_hops: [Option<u32>], ...);
~634 lines of boilerplate replaced with ~30 one-liner invocations.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-13 13:03:24 -04:00
|
|
|
|
|
|
|
|
// ── Journal tools ──────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
memory_tool!(journal_tail, ref, count: [Option<u64>], level: [Option<u64>], format: [Option<&str>], after: [Option<&str>]);
|
|
|
|
|
memory_tool!(journal_new, mut, name: [str], title: [str], body: [str], level: [Option<i64>]);
|
|
|
|
|
memory_tool!(journal_update, mut, body: [str], level: [Option<i64>]);
|
|
|
|
|
|
|
|
|
|
// ── Graph tools ───────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
memory_tool!(graph_topology, ref);
|
|
|
|
|
memory_tool!(graph_health, ref);
|
|
|
|
|
memory_tool!(graph_communities, ref, top_n: [Option<usize>], min_size: [Option<usize>]);
|
|
|
|
|
memory_tool!(graph_normalize_strengths, mut, apply: [Option<bool>]);
|
|
|
|
|
memory_tool!(graph_link_impact, ref, source: [str], target: [str]);
|
|
|
|
|
memory_tool!(graph_hubs, ref, count: [Option<usize>]);
|
|
|
|
|
memory_tool!(graph_trace, ref, key: [str]);
|
|
|
|
|
|
2026-04-12 21:16:13 -04:00
|
|
|
/// Single entry point for all memory/journal tool calls.
|
|
|
|
|
/// If not daemon, forwards to daemon with provenance attached.
|
|
|
|
|
async fn dispatch(
|
|
|
|
|
tool_name: &str,
|
|
|
|
|
agent: &Option<std::sync::Arc<crate::agent::Agent>>,
|
|
|
|
|
args: serde_json::Value,
|
|
|
|
|
) -> Result<String> {
|
2026-04-13 12:08:46 -04:00
|
|
|
let mut args = args;
|
|
|
|
|
if let Some(a) = agent {
|
|
|
|
|
let prov = a.state.lock().await.provenance.clone();
|
|
|
|
|
args.as_object_mut().map(|o| o.insert("_provenance".into(), prov.into()));
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-12 21:16:13 -04:00
|
|
|
if !is_daemon() {
|
2026-04-13 12:08:46 -04:00
|
|
|
// Forward to daemon
|
2026-04-12 21:16:13 -04:00
|
|
|
let name = tool_name.to_string();
|
|
|
|
|
return tokio::task::spawn_blocking(move || {
|
2026-04-13 13:39:59 -04:00
|
|
|
memory_rpc(&name, args)
|
2026-04-12 21:16:13 -04:00
|
|
|
}).await.map_err(|e| anyhow::anyhow!("spawn_blocking: {}", e))?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Daemon path - dispatch to implementation
|
|
|
|
|
match tool_name {
|
2026-04-13 13:26:22 -04:00
|
|
|
"memory_render" => jsonargs_memory_render(&args).await,
|
|
|
|
|
"memory_write" => jsonargs_memory_write(&args).await,
|
|
|
|
|
"memory_search" => jsonargs_memory_search(&args).await,
|
|
|
|
|
"memory_links" => jsonargs_memory_links(&args).await,
|
|
|
|
|
"memory_link_set" => jsonargs_memory_link_set(&args).await,
|
|
|
|
|
"memory_link_add" => jsonargs_memory_link_add(&args).await,
|
|
|
|
|
"memory_delete" => jsonargs_memory_delete(&args).await,
|
|
|
|
|
"memory_history" => jsonargs_memory_history(&args).await,
|
|
|
|
|
"memory_weight_set" => jsonargs_memory_weight_set(&args).await,
|
|
|
|
|
"memory_rename" => jsonargs_memory_rename(&args).await,
|
|
|
|
|
"memory_supersede" => jsonargs_memory_supersede(&args).await,
|
|
|
|
|
"memory_query" => jsonargs_memory_query(&args).await,
|
2026-04-13 13:12:11 -04:00
|
|
|
"graph_topology" => jsonargs_graph_topology(&args).await,
|
|
|
|
|
"graph_health" => jsonargs_graph_health(&args).await,
|
|
|
|
|
"graph_communities" => jsonargs_graph_communities(&args).await,
|
|
|
|
|
"graph_normalize_strengths" => jsonargs_graph_normalize_strengths(&args).await,
|
|
|
|
|
"graph_trace" => jsonargs_graph_trace(&args).await,
|
|
|
|
|
"graph_link_impact" => jsonargs_graph_link_impact(&args).await,
|
|
|
|
|
"graph_hubs" => jsonargs_graph_hubs(&args).await,
|
|
|
|
|
"journal_tail" => jsonargs_journal_tail(&args).await,
|
|
|
|
|
"journal_new" => jsonargs_journal_new(&args).await,
|
|
|
|
|
"journal_update" => jsonargs_journal_update(&args).await,
|
2026-04-12 21:16:13 -04:00
|
|
|
_ => anyhow::bail!("unknown tool: {}", tool_name),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
tools/memory: one function per tool
Split the monolithic dispatch(name, args) into individual public
functions (render, write, search, links, link_set, link_add, used,
weight_set, rename, supersede, query, output, journal_tail,
journal_new, journal_update) each with a matching _def() function.
The old dispatch() remains as a thin match for backward compat
until the Tool registry replaces it.
Co-Authored-By: Proof of Concept <poc@bcachefs.org>
2026-04-04 15:03:04 -04:00
|
|
|
// ── Definitions ────────────────────────────────────────────────
|
|
|
|
|
|
2026-04-13 01:37:33 -04:00
|
|
|
pub fn memory_tools() -> [super::Tool; 15] {
|
2026-04-04 15:22:03 -04:00
|
|
|
use super::Tool;
|
2026-04-04 15:50:14 -04:00
|
|
|
[
|
|
|
|
|
Tool { name: "memory_render", description: "Read a memory node's content and links.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"key":{"type":"string","description":"Node key"}},"required":["key"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_render", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_write", description: "Create or update a memory node.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"key":{"type":"string","description":"Node key"},"content":{"type":"string","description":"Full content (markdown)"}},"required":["key","content"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_write", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_search", description: "Search the memory graph via spreading activation. Give 2-4 seed node keys.",
|
2026-04-12 22:49:40 -04:00
|
|
|
parameters_json: r#"{"type":"object","properties":{"keys":{"type":"array","items":{"type":"string"},"description":"Seed node keys to activate from"},"max_hops":{"type":"integer","description":"Max graph hops (default 3)"},"edge_decay":{"type":"number","description":"Decay per hop (default 0.3)"},"min_activation":{"type":"number","description":"Cutoff threshold (default 0.01)"},"limit":{"type":"integer","description":"Max results (default 20)"}},"required":["keys"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_search", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_links", description: "Show a node's neighbors with link strengths.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"key":{"type":"string","description":"Node key"}},"required":["key"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_links", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_link_set", description: "Set link strength between two nodes.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"source":{"type":"string"},"target":{"type":"string"},"strength":{"type":"number","description":"0.01 to 1.0"}},"required":["source","target","strength"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_link_set", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_link_add", description: "Add a new link between two nodes.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"source":{"type":"string"},"target":{"type":"string"}},"required":["source","target"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_link_add", &a, v).await })) },
|
2026-04-12 22:15:53 -04:00
|
|
|
Tool { name: "memory_delete", description: "Delete a memory node.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"key":{"type":"string","description":"Node key"}},"required":["key"]}"#,
|
|
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_delete", &a, v).await })) },
|
2026-04-12 22:24:34 -04:00
|
|
|
Tool { name: "memory_history", description: "Show version history for a node.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"key":{"type":"string","description":"Node key"},"full":{"type":"boolean","description":"Show full content for each version"}},"required":["key"]}"#,
|
|
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_history", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_weight_set", description: "Set a node's weight directly (0.01 to 1.0).",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"key":{"type":"string"},"weight":{"type":"number","description":"0.01 to 1.0"}},"required":["key","weight"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_weight_set", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_rename", description: "Rename a node key in place.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"old_key":{"type":"string"},"new_key":{"type":"string"}},"required":["old_key","new_key"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_rename", &a, v).await })) },
|
2026-04-04 15:50:14 -04:00
|
|
|
Tool { name: "memory_supersede", description: "Mark a node as superseded by another (sets weight to 0.01).",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"old_key":{"type":"string"},"new_key":{"type":"string"},"reason":{"type":"string"}},"required":["old_key","new_key"]}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_supersede", &a, v).await })) },
|
2026-04-10 16:04:31 -04:00
|
|
|
Tool { name: "memory_query",
|
|
|
|
|
description: "Run a structured query against the memory graph.",
|
|
|
|
|
parameters_json: r#"{
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
|
|
|
|
"query": {"type": "string", "description": "Query expression"},
|
|
|
|
|
"format": {"type": "string", "description": "compact (default) or full (with content and graph metrics)", "default": "compact"}
|
|
|
|
|
},
|
|
|
|
|
"required": ["query"]
|
|
|
|
|
}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("memory_query", &a, v).await })) },
|
2026-04-10 15:25:57 -04:00
|
|
|
Tool { name: "graph_topology", description: "Show graph topology stats (nodes, edges, clustering, hubs).",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{}}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("graph_topology", &a, v).await })) },
|
2026-04-10 15:25:57 -04:00
|
|
|
Tool { name: "graph_health", description: "Show graph health report with maintenance recommendations.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{}}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("graph_health", &a, v).await })) },
|
2026-04-13 01:37:33 -04:00
|
|
|
Tool { name: "graph_hubs", description: "Show top hub nodes by degree, spread apart for diverse link targets.",
|
|
|
|
|
parameters_json: r#"{"type":"object","properties":{"count":{"type":"integer","description":"Number of hubs to return (default 20)"}}}"#,
|
|
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("graph_hubs", &a, v).await })) },
|
2026-04-01 15:12:14 -04:00
|
|
|
]
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-04 15:50:14 -04:00
|
|
|
pub fn journal_tools() -> [super::Tool; 3] {
|
2026-04-04 15:22:03 -04:00
|
|
|
use super::Tool;
|
2026-04-04 15:50:14 -04:00
|
|
|
[
|
2026-04-10 16:04:31 -04:00
|
|
|
Tool { name: "journal_tail",
|
|
|
|
|
description: "Read the last N entries at a given level.",
|
|
|
|
|
parameters_json: r#"{
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
2026-04-10 16:09:46 -04:00
|
|
|
"count": {"type": "integer", "description": "Number of entries", "default": 1},
|
|
|
|
|
"level": {"type": "integer", "description": "0=journal, 1=daily, 2=weekly, 3=monthly", "default": 0},
|
|
|
|
|
"format": {"type": "string", "description": "compact or full (with content)", "default": "full"},
|
|
|
|
|
"after": {"type": "string", "description": "Only entries after this date (YYYY-MM-DD)"}
|
2026-04-10 16:04:31 -04:00
|
|
|
}
|
|
|
|
|
}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("journal_tail", &a, v).await })) },
|
2026-04-12 02:04:50 -04:00
|
|
|
Tool { name: "journal_new", description: "Start a new journal/digest entry.",
|
|
|
|
|
parameters_json: r#"{
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
|
|
|
|
"name": {"type": "string", "description": "Short node name (becomes the key)"},
|
|
|
|
|
"title": {"type": "string", "description": "Descriptive title"},
|
|
|
|
|
"body": {"type": "string", "description": "Entry body"},
|
|
|
|
|
"level": {"type": "integer", "description": "0=journal, 1=daily, 2=weekly, 3=monthly", "default": 0}
|
|
|
|
|
},
|
|
|
|
|
"required": ["name", "title", "body"]
|
|
|
|
|
}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("journal_new", &a, v).await })) },
|
2026-04-12 02:04:50 -04:00
|
|
|
Tool { name: "journal_update", description: "Append text to the most recent entry at a level.",
|
|
|
|
|
parameters_json: r#"{
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
|
|
|
|
"body": {"type": "string", "description": "Text to append"},
|
|
|
|
|
"level": {"type": "integer", "description": "0=journal, 1=daily, 2=weekly, 3=monthly", "default": 0}
|
|
|
|
|
},
|
|
|
|
|
"required": ["body"]
|
|
|
|
|
}"#,
|
2026-04-12 21:16:13 -04:00
|
|
|
handler: Arc::new(|a, v| Box::pin(async move { dispatch("journal_update", &a, v).await })) },
|
2026-04-04 15:22:03 -04:00
|
|
|
]
|
2026-03-27 15:22:48 -04:00
|
|
|
}
|