add call_simple for non-agent LLM calls
audit, digest, and compare now go through the API backend via
call_simple(), which logs to llm-logs/{caller}/.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
e3f7d6bd3c
commit
04dffa2184
4 changed files with 27 additions and 13 deletions
|
|
@ -3,7 +3,7 @@
|
||||||
// Each batch of links gets reviewed by Sonnet, which returns per-link actions:
|
// Each batch of links gets reviewed by Sonnet, which returns per-link actions:
|
||||||
// KEEP, DELETE, RETARGET, WEAKEN, STRENGTHEN. Batches run in parallel via rayon.
|
// KEEP, DELETE, RETARGET, WEAKEN, STRENGTHEN. Batches run in parallel via rayon.
|
||||||
|
|
||||||
use super::llm::call_sonnet;
|
use super::llm;
|
||||||
use crate::store::{self, Store, new_relation};
|
use crate::store::{self, Store, new_relation};
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
|
@ -211,7 +211,7 @@ pub fn link_audit(store: &mut Store, apply: bool) -> Result<AuditStats, String>
|
||||||
// Run batches in parallel via rayon
|
// Run batches in parallel via rayon
|
||||||
let batch_results: Vec<_> = batch_data.par_iter()
|
let batch_results: Vec<_> = batch_data.par_iter()
|
||||||
.map(|(batch_idx, batch_infos, prompt)| {
|
.map(|(batch_idx, batch_infos, prompt)| {
|
||||||
let response = call_sonnet("audit", prompt);
|
let response = llm::call_simple("audit", prompt);
|
||||||
let completed = done.fetch_add(1, Ordering::Relaxed) + 1;
|
let completed = done.fetch_add(1, Ordering::Relaxed) + 1;
|
||||||
eprint!("\r Batches: {}/{} done", completed, total_batches);
|
eprint!("\r Batches: {}/{} done", completed, total_batches);
|
||||||
(*batch_idx, batch_infos, response)
|
(*batch_idx, batch_infos, response)
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
// summarize weeklies. All three share the same generate/auto-detect
|
// summarize weeklies. All three share the same generate/auto-detect
|
||||||
// pipeline, parameterized by DigestLevel.
|
// pipeline, parameterized by DigestLevel.
|
||||||
|
|
||||||
use super::llm::{call_sonnet, semantic_keys};
|
use super::llm;
|
||||||
use crate::store::{self, Store, new_relation};
|
use crate::store::{self, Store, new_relation};
|
||||||
use crate::neuro;
|
use crate::neuro;
|
||||||
|
|
||||||
|
|
@ -211,7 +211,7 @@ fn generate_digest(
|
||||||
}
|
}
|
||||||
println!(" {} inputs", inputs.len());
|
println!(" {} inputs", inputs.len());
|
||||||
|
|
||||||
let keys = semantic_keys(store);
|
let keys = llm::semantic_keys(store);
|
||||||
let keys_text = keys.iter()
|
let keys_text = keys.iter()
|
||||||
.map(|k| format!(" - {}", k))
|
.map(|k| format!(" - {}", k))
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
|
|
@ -244,7 +244,7 @@ fn generate_digest(
|
||||||
println!(" Prompt: {} chars (~{} tokens)", prompt.len(), prompt.len() / 4);
|
println!(" Prompt: {} chars (~{} tokens)", prompt.len(), prompt.len() / 4);
|
||||||
|
|
||||||
println!(" Calling Sonnet...");
|
println!(" Calling Sonnet...");
|
||||||
let digest = call_sonnet("digest", &prompt)?;
|
let digest = llm::call_simple("digest", &prompt)?;
|
||||||
|
|
||||||
let key = digest_node_key(level.name, label);
|
let key = digest_node_key(level.name, label);
|
||||||
store.upsert_provenance(&key, &digest, "digest:write")?;
|
store.upsert_provenance(&key, &digest, "digest:write")?;
|
||||||
|
|
|
||||||
|
|
@ -183,9 +183,26 @@ pub(crate) fn call_haiku(agent: &str, prompt: &str) -> Result<String, String> {
|
||||||
call_model(agent, "haiku", prompt)
|
call_model(agent, "haiku", prompt)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Call a model using an agent definition's model and tool configuration.
|
/// Simple LLM call for non-agent uses (audit, digest, compare).
|
||||||
/// Uses the direct API backend when api_base_url is configured,
|
/// Logs to llm-logs/{caller}/ file.
|
||||||
/// otherwise falls back to claude CLI subprocess.
|
pub(crate) fn call_simple(caller: &str, prompt: &str) -> Result<String, String> {
|
||||||
|
let log_dir = crate::store::memory_dir().join("llm-logs").join(caller);
|
||||||
|
fs::create_dir_all(&log_dir).ok();
|
||||||
|
let log_path = log_dir.join(format!("{}.txt", crate::store::compact_timestamp()));
|
||||||
|
|
||||||
|
use std::io::Write;
|
||||||
|
let log = move |msg: &str| {
|
||||||
|
if let Ok(mut f) = fs::OpenOptions::new()
|
||||||
|
.create(true).append(true).open(&log_path)
|
||||||
|
{
|
||||||
|
let _ = writeln!(f, "{}", msg);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
super::api::call_api_with_tools_sync(caller, prompt, &log)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Call a model using an agent definition's configuration.
|
||||||
pub(crate) fn call_for_def(
|
pub(crate) fn call_for_def(
|
||||||
def: &super::defs::AgentDef,
|
def: &super::defs::AgentDef,
|
||||||
prompt: &str,
|
prompt: &str,
|
||||||
|
|
|
||||||
|
|
@ -384,11 +384,8 @@ fn llm_compare(
|
||||||
) -> Result<std::cmp::Ordering, String> {
|
) -> Result<std::cmp::Ordering, String> {
|
||||||
let prompt = build_compare_prompt(a, b);
|
let prompt = build_compare_prompt(a, b);
|
||||||
|
|
||||||
let response = if model == "haiku" {
|
let _ = model; // model selection handled by API backend config
|
||||||
llm::call_haiku("compare", &prompt)?
|
let response = llm::call_simple("compare", &prompt)?;
|
||||||
} else {
|
|
||||||
llm::call_sonnet("compare", &prompt)?
|
|
||||||
};
|
|
||||||
let response = response.trim().to_uppercase();
|
let response = response.trim().to_uppercase();
|
||||||
|
|
||||||
if response.contains("BETTER: B") {
|
if response.contains("BETTER: B") {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue