scripts: use capnp store instead of reading markdown directly

Add store_helpers.py with shared helpers that call poc-memory commands
(list-keys, render, journal-tail) instead of globbing ~/.claude/memory/*.md
and parsing section headers.

All 9 Python scripts updated: get_semantic_keys(), get_topic_file_index(),
get_recent_journal(), parse_journal_entries(), read_journal_range(),
collect_topic_stems(), and file preview rendering now go through the store.

This completes the clean switch — no script reads archived markdown files.
This commit is contained in:
ProofOfConcept 2026-02-28 23:32:47 -05:00
parent f20ea4f827
commit d14710e477
10 changed files with 324 additions and 297 deletions

View file

@ -64,88 +64,36 @@ def call_sonnet(prompt: str, timeout: int = 600) -> str:
# ---------------------------------------------------------------------------
def get_recent_journal(n_lines: int = 200) -> str:
"""Get last N lines of journal."""
journal = MEMORY_DIR / "journal.md"
if not journal.exists():
return ""
with open(journal) as f:
lines = f.readlines()
return "".join(lines[-n_lines:])
"""Get recent journal entries from the store."""
from store_helpers import get_recent_journal as _get_journal
# n_lines ≈ 50 entries (rough heuristic: ~4 lines per entry)
return _get_journal(n=max(20, n_lines // 4))
def get_topic_file_index() -> dict[str, list[str]]:
"""Build index of topic files and their section headers."""
index = {}
for md in sorted(MEMORY_DIR.glob("*.md")):
name = md.name
if name in ("journal.md", "MEMORY.md", "where-am-i.md",
"work-queue.md", "search-testing.md"):
continue
sections = []
try:
with open(md) as f:
for line in f:
if line.startswith("## "):
sections.append(line.strip())
except Exception:
pass
index[name] = sections
return index
"""Build index of topic files and their section headers from the store."""
from store_helpers import get_topic_file_index as _get_index
return _get_index()
def get_mem_markers() -> list[dict]:
"""Extract all <!-- mem: --> markers from memory files."""
"""Get relations from the store (replaces mem marker parsing)."""
from store_helpers import get_relations
raw = get_relations()
# Parse list-edges output into marker-like dicts
markers = []
for md in sorted(MEMORY_DIR.glob("*.md")):
if md.name in ("journal.md", "MEMORY.md"):
for line in raw.split('\n'):
line = line.strip()
if not line:
continue
try:
content = md.read_text()
for match in re.finditer(
r'<!-- mem: (.*?) -->', content):
attrs = {}
for part in match.group(1).split():
if '=' in part:
k, v = part.split('=', 1)
attrs[k] = v
attrs['_file'] = md.name
markers.append(attrs)
except Exception:
pass
markers.append({"_raw": line})
return markers
def get_topic_summaries(max_chars_per_file: int = 500) -> str:
"""Get first N chars of each topic file for cross-link scanning."""
parts = []
for md in sorted(MEMORY_DIR.glob("*.md")):
name = md.name
if name in ("journal.md", "MEMORY.md", "where-am-i.md",
"work-queue.md", "search-testing.md"):
continue
try:
content = md.read_text()
# Get sections and first paragraph of each
sections = []
current_section = name
current_content = []
for line in content.split('\n'):
if line.startswith("## "):
if current_content:
text = '\n'.join(current_content[:5])
sections.append(f" {current_section}: {text[:200]}")
current_section = line.strip()
current_content = []
elif line.strip():
current_content.append(line.strip())
if current_content:
text = '\n'.join(current_content[:5])
sections.append(f" {current_section}: {text[:200]}")
parts.append(f"\n### {name}\n" + '\n'.join(sections[:15]))
except Exception:
pass
return '\n'.join(parts)
"""Get topic file summaries from the store."""
from store_helpers import get_topic_summaries as _get_summaries
return _get_summaries(max_chars_per_file)
def get_graph_stats() -> str:
@ -274,16 +222,13 @@ def build_crosslink_prompt() -> str:
marker_text = ""
for m in markers:
f = m.get('_file', '?')
mid = m.get('id', '?')
links = m.get('links', '')
marker_text += f" {f}#{mid} → links={links}\n"
marker_text += f" {m.get('_raw', '?')}\n"
return f"""You are the Cross-Link Scanner for ProofOfConcept's memory system.
Your job: find MISSING connections between topic files.
## Existing links (from <!-- mem: --> markers)
## Existing relations (from the memory graph)
{marker_text}
@ -328,15 +273,13 @@ def build_topology_prompt() -> str:
stats = get_graph_stats()
topic_index = get_topic_file_index()
# Get node counts per file from the store
from store_helpers import get_topic_file_index as _get_index
topic_index = _get_index()
file_sizes = ""
for md in sorted(MEMORY_DIR.glob("*.md")):
if md.name in ("journal.md", "MEMORY.md"):
continue
try:
lines = len(md.read_text().split('\n'))
file_sizes += f" {md.name}: {lines} lines\n"
except Exception:
pass
for fname in sorted(topic_index.keys()):
n_sections = len(topic_index[fname])
file_sizes += f" {fname}: {n_sections} sections\n"
return f"""You are the Topology Reporter for ProofOfConcept's memory system.