diff --git a/src/agent/mod.rs b/src/agent/mod.rs index e769b45..fe96e6c 100644 --- a/src/agent/mod.rs +++ b/src/agent/mod.rs @@ -25,7 +25,7 @@ use tiktoken_rs::CoreBPE; use api::{ApiClient, StreamEvent}; use context as journal; -use tools::{ToolCall, ToolDef, FunctionCall, summarize_args}; +use tools::{ToolCall, FunctionCall, summarize_args}; use crate::user::log::ConversationLog; use crate::agent::api::types::*; @@ -72,7 +72,7 @@ impl DispatchState { pub struct Agent { client: ApiClient, - tool_defs: Vec, + tools: Vec, /// Last known prompt token count from the API (tracks context size). last_prompt_tokens: u32, /// Current reasoning effort level ("none", "low", "high"). @@ -127,7 +127,6 @@ impl Agent { shared_context: SharedContextState, active_tools: crate::user::ui_channel::SharedActiveTools, ) -> Self { - let tool_defs = tools::definitions(); let tokenizer = tiktoken_rs::cl100k_base() .expect("failed to load cl100k_base tokenizer"); @@ -142,7 +141,7 @@ impl Agent { let agent_cycles = crate::subconscious::subconscious::AgentCycleState::new(&session_id); let mut agent = Self { client, - tool_defs, + tools: tools::tools(), last_prompt_tokens: 0, reasoning_effort: "none".to_string(), temperature: 0.6, @@ -307,9 +306,10 @@ impl Agent { top_p: me.top_p, top_k: me.top_k, }; + let tool_defs: Vec<_> = me.tools.iter().map(|t| t.to_tool_def()).collect(); me.client.start_stream( &api_messages, - Some(&me.tool_defs), + Some(&tool_defs), ui_tx, &me.reasoning_effort, sampling,