Fix build warnings across workspace

- Remove redundant token fields from StreamEvent::Finished (data
  already delivered via Usage event)
- Remove dead hotkey_adjust_sampling, MAX_HISTORY, now()
- Fix unused variable warnings (delta, log)
- Suppress deserialization-only field warnings (jsonrpc, role)
- Make start_stream/chat_completion_stream_temp pub(crate)
- Remove unnecessary pub(crate) re-export of internal types

Remaining warnings are TODO items: SkipIndex (scoring not wired),
notify (MCP notifications not wired).

Co-Authored-By: Proof of Concept <poc@bcachefs.org>
This commit is contained in:
Kent Overstreet 2026-04-07 13:55:30 -04:00
parent c64295ddb2
commit 9737641c86
10 changed files with 11 additions and 42 deletions

View file

@ -13,8 +13,6 @@ mod openai;
// Public API types — used outside agent::api
pub use types::{Message, MessageContent, ContentPart, ImageUrl, Role, ToolCall, FunctionCall, Usage};
// Internal types — re-exported for sibling modules within agent/
pub(crate) use types::{ChatRequest, ReasoningConfig, ChatCompletionChunk, ChunkChoice, Delta, ToolCallDelta, FunctionCallDelta};
use anyhow::Result;
use std::time::{Duration, Instant};
@ -72,8 +70,6 @@ pub(crate) enum StreamEvent {
/// Stream finished.
Finished {
reason: String,
prompt_tokens: u32,
completion_tokens: u32,
},
/// Error from the stream.
Error(String),
@ -105,7 +101,7 @@ impl ApiClient {
/// Start a streaming chat completion. Returns a receiver of StreamEvents.
/// The caller (runner) reads events and handles routing to the UI.
///
pub fn start_stream(
pub(crate) fn start_stream(
&self,
messages: &[Message],
tools: &[agent_tools::Tool],
@ -137,7 +133,7 @@ impl ApiClient {
(rx, AbortOnDrop(handle))
}
pub async fn chat_completion_stream_temp(
pub(crate) async fn chat_completion_stream_temp(
&self,
messages: &[Message],
tools: &[agent_tools::Tool],

View file

@ -181,14 +181,7 @@ pub(super) async fn stream_events(
);
let reason = finish_reason.unwrap_or_default();
let (pt, ct) = usage.as_ref()
.map(|u| (u.prompt_tokens, u.completion_tokens))
.unwrap_or((0, 0));
let _ = tx.send(StreamEvent::Finished {
reason,
prompt_tokens: pt,
completion_tokens: ct,
});
let _ = tx.send(StreamEvent::Finished { reason });
Ok(())
}

View file

@ -154,7 +154,6 @@ pub(crate) struct ReasoningConfig {
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct Usage {
pub prompt_tokens: u32,
pub completion_tokens: u32,
@ -171,7 +170,6 @@ pub(crate) struct ChatCompletionChunk {
}
#[derive(Debug, Deserialize)]
#[allow(dead_code)]
pub(crate) struct ChunkChoice {
pub delta: Delta,
pub finish_reason: Option<String>,
@ -180,8 +178,8 @@ pub(crate) struct ChunkChoice {
/// The delta within a streaming chunk. All fields optional because each
/// chunk only carries the incremental change.
#[derive(Debug, Deserialize, Default)]
#[allow(dead_code)]
pub(crate) struct Delta {
#[allow(dead_code)] // present for deserialization
pub role: Option<Role>,
pub content: Option<String>,
/// Reasoning/thinking content — sent by some models (Qwen, DeepSeek)
@ -276,7 +274,6 @@ impl Message {
}
}
#[allow(dead_code)]
pub fn assistant(content: impl Into<String>) -> Self {
Self {
role: Role::Assistant,

View file

@ -14,7 +14,7 @@ use std::fs;
use std::path::PathBuf;
use std::sync::OnceLock;
use super::api::{self, ApiClient, Message, Usage};
use super::api::{ApiClient, Message, Usage};
use super::tools::{self as agent_tools};
use super::Agent;