scripts: use capnp store instead of reading markdown directly
Add store_helpers.py with shared helpers that call poc-memory commands (list-keys, render, journal-tail) instead of globbing ~/.claude/memory/*.md and parsing section headers. All 9 Python scripts updated: get_semantic_keys(), get_topic_file_index(), get_recent_journal(), parse_journal_entries(), read_journal_range(), collect_topic_stems(), and file preview rendering now go through the store. This completes the clean switch — no script reads archived markdown files.
This commit is contained in:
parent
f20ea4f827
commit
d14710e477
10 changed files with 324 additions and 297 deletions
|
|
@ -55,10 +55,28 @@ def call_sonnet(prompt: str, timeout: int = 600) -> str:
|
|||
|
||||
|
||||
def read_file(path: Path) -> str:
|
||||
"""Read a file, return empty string if missing."""
|
||||
"""Read a file, return empty string if missing.
|
||||
|
||||
Falls back to the store if the file doesn't exist on disk
|
||||
(content markdown files have been archived).
|
||||
"""
|
||||
if path.exists():
|
||||
return path.read_text()
|
||||
return ""
|
||||
# Try the store — the filename is the key
|
||||
from store_helpers import render, list_keys
|
||||
key = path.name
|
||||
# Gather file-level + section content
|
||||
all_keys = list_keys()
|
||||
prefix = f"{key}#"
|
||||
matching = [k for k in all_keys if k == key or k.startswith(prefix)]
|
||||
if not matching:
|
||||
return ""
|
||||
parts = []
|
||||
for k in matching:
|
||||
content = render(k)
|
||||
if content:
|
||||
parts.append(content)
|
||||
return "\n\n".join(parts)
|
||||
|
||||
|
||||
def read_digest(name: str) -> str:
|
||||
|
|
@ -68,25 +86,9 @@ def read_digest(name: str) -> str:
|
|||
|
||||
|
||||
def read_journal_range(start_date: str, end_date: str) -> str:
|
||||
"""Extract journal entries between two dates."""
|
||||
journal = MEMORY_DIR / "journal.md"
|
||||
if not journal.exists():
|
||||
return ""
|
||||
content = journal.read_text()
|
||||
# Extract entries between dates
|
||||
lines = content.split('\n')
|
||||
result = []
|
||||
capturing = False
|
||||
for line in lines:
|
||||
if line.startswith('## '):
|
||||
# Check if this is a date header
|
||||
if start_date <= line[3:13] <= end_date:
|
||||
capturing = True
|
||||
elif capturing and line[3:13] > end_date:
|
||||
capturing = False
|
||||
if capturing:
|
||||
result.append(line)
|
||||
return '\n'.join(result[-500:]) # Last 500 lines in range
|
||||
"""Get journal entries between two dates from the store."""
|
||||
from store_helpers import get_journal_range
|
||||
return get_journal_range(start_date, end_date)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
|
@ -382,47 +384,45 @@ def run_task(task: dict, do_apply: bool) -> dict:
|
|||
result["status"] = "dry_run"
|
||||
return result
|
||||
|
||||
# Apply the content
|
||||
target_path = MEMORY_DIR / task["target"]
|
||||
# Apply the content — write directly to the store
|
||||
target = task["target"]
|
||||
|
||||
if task["action"] == "create":
|
||||
if target_path.exists():
|
||||
print(f" ! Target already exists: {target_path}")
|
||||
result["status"] = "skipped"
|
||||
return result
|
||||
target_path.write_text(content + "\n")
|
||||
print(f" + Created: {target_path} ({result['content_lines']} lines)")
|
||||
# Write each section as a separate node
|
||||
proc = subprocess.run(
|
||||
["poc-memory", "write", target],
|
||||
input=content, capture_output=True, text=True, timeout=30
|
||||
)
|
||||
print(f" + Created in store: {target} ({result['content_lines']} lines)")
|
||||
if proc.stdout.strip():
|
||||
print(f" {proc.stdout.strip()}")
|
||||
result["status"] = "applied"
|
||||
|
||||
elif task["action"] == "append_section":
|
||||
if not target_path.exists():
|
||||
print(f" ! Target doesn't exist: {target_path}")
|
||||
result["status"] = "error"
|
||||
return result
|
||||
existing = target_path.read_text()
|
||||
# Append with separator
|
||||
with open(target_path, "a") as f:
|
||||
f.write("\n\n" + content + "\n")
|
||||
print(f" + Appended to: {target_path} ({result['content_lines']} lines)")
|
||||
# Extract section key from content (## header → slug)
|
||||
header_match = re.match(r'^## (.+)', content)
|
||||
if header_match:
|
||||
slug = re.sub(r'[^a-z0-9-]', '',
|
||||
header_match.group(1).strip().lower().replace(' ', '-'))
|
||||
key = f"{target}#{slug}"
|
||||
else:
|
||||
key = target
|
||||
proc = subprocess.run(
|
||||
["poc-memory", "write", key],
|
||||
input=content, capture_output=True, text=True, timeout=30
|
||||
)
|
||||
print(f" + Appended to store: {key} ({result['content_lines']} lines)")
|
||||
if proc.stdout.strip():
|
||||
print(f" {proc.stdout.strip()}")
|
||||
result["status"] = "applied"
|
||||
|
||||
elif task["action"] == "update":
|
||||
# For updates, we save the proposed changes and let the user review
|
||||
output_path = AGENT_RESULTS_DIR / f"promotion-{task['target']}-{datetime.now().strftime('%Y%m%dT%H%M%S')}.md"
|
||||
output_path.write_text(f"# Proposed update for {task['target']}\n\n{content}\n")
|
||||
# For updates, save proposed changes for review
|
||||
output_path = AGENT_RESULTS_DIR / f"promotion-{target}-{datetime.now().strftime('%Y%m%dT%H%M%S')}.md"
|
||||
output_path.write_text(f"# Proposed update for {target}\n\n{content}\n")
|
||||
print(f" ~ Saved proposed update: {output_path}")
|
||||
result["status"] = "proposed"
|
||||
|
||||
# Register new content with poc-memory
|
||||
if result["status"] == "applied":
|
||||
try:
|
||||
subprocess.run(
|
||||
["poc-memory", "init"],
|
||||
capture_output=True, text=True, timeout=30
|
||||
)
|
||||
except Exception:
|
||||
pass # Non-critical
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue