From c64295ddb23126dd91096363130befee273a4d99 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 7 Apr 2026 13:43:25 -0400 Subject: [PATCH] Reduce pub visibility in agent::api and user modules api/: parsing module private, SamplingParams/StreamEvent/StreamResult/ AbortOnDrop/build_response_message/collect_stream to pub(crate). Internal types (ChatRequest, ChunkChoice, Delta, etc.) to pub(crate). StreamResult fields to pub(crate). Parsing functions to pub(super). user/: context, subconscious, unconscious, thalamus modules private (only chat needs pub(crate) for mind/ access). Co-Authored-By: Proof of Concept --- src/agent/api/mod.rs | 28 ++++++++++++++-------------- src/agent/api/parsing.rs | 6 +++--- src/agent/api/types.rs | 14 +++++++------- src/user/mod.rs | 10 +++++----- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/agent/api/mod.rs b/src/agent/api/mod.rs index fd3fd38..ae75a4b 100644 --- a/src/agent/api/mod.rs +++ b/src/agent/api/mod.rs @@ -7,7 +7,7 @@ // Set POC_DEBUG=1 for verbose per-turn logging. pub mod http; -pub mod parsing; +mod parsing; mod types; mod openai; @@ -26,7 +26,7 @@ use tokio::sync::mpsc; use crate::agent::tools::{self as agent_tools, summarize_args, ActiveToolCall}; /// A JoinHandle that aborts its task when dropped. -pub struct AbortOnDrop(tokio::task::JoinHandle<()>); +pub(crate) struct AbortOnDrop(tokio::task::JoinHandle<()>); impl Drop for AbortOnDrop { fn drop(&mut self) { @@ -36,7 +36,7 @@ impl Drop for AbortOnDrop { /// Sampling parameters for model generation. #[derive(Clone, Copy)] -pub struct SamplingParams { +pub(crate) struct SamplingParams { pub temperature: f32, pub top_p: f32, pub top_k: u32, @@ -54,7 +54,7 @@ fn tools_to_json_str(tools: &[agent_tools::Tool]) -> String { /// Events produced by the streaming API backends. /// The runner reads these and decides what to display where. -pub enum StreamEvent { +pub(crate) enum StreamEvent { /// Content token from the model's response. Content(String), /// Reasoning/thinking token (internal monologue). @@ -447,7 +447,7 @@ impl SseReader { /// from models that emit tool calls as text), parse them out and /// promote them to structured tool_calls. This way all consumers /// see tool calls uniformly regardless of backend. -pub fn build_response_message( +pub(crate) fn build_response_message( content: String, tool_calls: Vec, ) -> Message { @@ -574,16 +574,16 @@ pub(crate) fn log_diagnostics( // --------------------------------------------------------------------------- /// Result of collecting a complete response from the stream. -pub struct StreamResult { - pub content: String, - pub tool_calls: Vec, - pub usage: Option, - pub finish_reason: Option, - pub error: Option, +pub(crate) struct StreamResult { + pub(crate) content: String, + pub(crate) tool_calls: Vec, + pub(crate) usage: Option, + pub(crate) finish_reason: Option, + pub(crate) error: Option, /// Remaining display buffer (caller should flush if not in a tool call). - pub display_buf: String, + pub(crate) display_buf: String, /// Whether we were mid-tool-call when the stream ended. - pub in_tool_call: bool, + pub(crate) in_tool_call: bool, } /// Collect stream events into a complete response. Handles: @@ -591,7 +591,7 @@ pub struct StreamResult { /// - Leaked tool call detection and dispatch (Qwen XML in content) /// - Structured tool call delta assembly (OpenAI-style) /// - UI forwarding (text deltas, reasoning, tool call notifications) -pub async fn collect_stream( +pub(crate) async fn collect_stream( rx: &mut mpsc::UnboundedReceiver, agent: &std::sync::Arc>, active_tools: &crate::agent::tools::SharedActiveTools, diff --git a/src/agent/api/parsing.rs b/src/agent/api/parsing.rs index 2658aa5..94b03d0 100644 --- a/src/agent/api/parsing.rs +++ b/src/agent/api/parsing.rs @@ -17,7 +17,7 @@ use super::types::{ToolCall, FunctionCall}; /// Looks for `...` blocks and tries both /// XML and JSON formats for the body. /// Parse a single tool call body (content between `` and ``). -pub fn parse_tool_call_body(body: &str) -> Option { +pub(super) fn parse_tool_call_body(body: &str) -> Option { let normalized = normalize_xml_tags(body); let body = normalized.trim(); let mut counter = 0u32; @@ -25,7 +25,7 @@ pub fn parse_tool_call_body(body: &str) -> Option { .or_else(|| parse_json_tool_call(body, &mut counter)) } -pub fn parse_leaked_tool_calls(text: &str) -> Vec { +pub(super) fn parse_leaked_tool_calls(text: &str) -> Vec { // Normalize whitespace inside XML tags: "<\nfunction\n=\nbash\n>" → "" // This handles streaming tokenizers that split tags across tokens. let normalized = normalize_xml_tags(text); @@ -147,7 +147,7 @@ fn parse_json_tool_call(body: &str, counter: &mut u32) -> Option { /// Strip tool call XML and thinking tokens from text so the conversation /// history stays clean. Removes `...` blocks and /// `` tags (thinking content before them is kept — it's useful context). -pub fn strip_leaked_artifacts(text: &str) -> String { +pub(super) fn strip_leaked_artifacts(text: &str) -> String { let normalized = normalize_xml_tags(text); let mut result = normalized.clone(); diff --git a/src/agent/api/types.rs b/src/agent/api/types.rs index ce79cdf..d5dcee6 100644 --- a/src/agent/api/types.rs +++ b/src/agent/api/types.rs @@ -18,7 +18,7 @@ pub struct FunctionCall { /// Partial function call within a streaming delta. #[derive(Debug, Deserialize)] -pub struct FunctionCallDelta { +pub(crate) struct FunctionCallDelta { pub name: Option, pub arguments: Option, } @@ -34,7 +34,7 @@ pub struct ToolCall { /// A partial tool call within a streaming delta. #[derive(Debug, Deserialize)] -pub struct ToolCallDelta { +pub(crate) struct ToolCallDelta { pub index: usize, pub id: Option, #[serde(rename = "type")] @@ -114,7 +114,7 @@ pub enum Role { /// Chat completion request. #[derive(Debug, Serialize)] -pub struct ChatRequest { +pub(crate) struct ChatRequest { pub model: String, pub messages: Vec, #[serde(skip_serializing_if = "Option::is_none")] @@ -146,7 +146,7 @@ pub struct ChatRequest { } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ReasoningConfig { +pub(crate) struct ReasoningConfig { pub enabled: bool, /// "none" disables reasoning entirely per OpenRouter docs. #[serde(skip_serializing_if = "Option::is_none")] @@ -165,14 +165,14 @@ pub struct Usage { /// A single chunk from a streaming chat completion response (SSE). #[derive(Debug, Deserialize)] -pub struct ChatCompletionChunk { +pub(crate) struct ChatCompletionChunk { pub choices: Vec, pub usage: Option, } #[derive(Debug, Deserialize)] #[allow(dead_code)] -pub struct ChunkChoice { +pub(crate) struct ChunkChoice { pub delta: Delta, pub finish_reason: Option, } @@ -181,7 +181,7 @@ pub struct ChunkChoice { /// chunk only carries the incremental change. #[derive(Debug, Deserialize, Default)] #[allow(dead_code)] -pub struct Delta { +pub(crate) struct Delta { pub role: Option, pub content: Option, /// Reasoning/thinking content — sent by some models (Qwen, DeepSeek) diff --git a/src/user/mod.rs b/src/user/mod.rs index 687245d..fb7ea31 100644 --- a/src/user/mod.rs +++ b/src/user/mod.rs @@ -3,11 +3,11 @@ // TUI, UI channel, parsing. The cognitive layer (session state // machine, DMN, identity) lives in mind/. -pub mod chat; -pub mod context; -pub mod subconscious; -pub mod unconscious; -pub mod thalamus; +pub(crate) mod chat; +mod context; +mod subconscious; +mod unconscious; +mod thalamus; use anyhow::Result; use std::io::Write;