Skip to main content
Glama
precompact_extract.py5.2 kB
#!/usr/bin/env python3 """ PreCompact Hook: Extract conversational narrative before compaction. Reads .jsonl, filters out tool blocks, extracts text content, and saves to .nisaba/tui/compacted_transcript.md Exit code 0: Allow CLI's native compaction to proceed. """ import json import sys from pathlib import Path from datetime import datetime # Add src to path for nisaba imports sys.path.insert(0, str(Path(__file__).parent.parent / "src")) from nisaba.workspace_files import WorkspaceFiles def extract_transcript(jsonl_path: Path, session_id: str) -> str: """ Extract conversational text from .jsonl. Returns markdown-formatted transcript with: - Session ID header - User messages (text only) - Assistant messages (text only, no tool_use) - Timestamps preserved """ if not jsonl_path.exists(): return "" transcript_lines = [] transcript_lines.append("---") transcript_lines.append(f"Session: {session_id} - {jsonl_path} - {datetime.now().isoformat()}") transcript_lines.append("---") last_role = "" with open(jsonl_path, 'r') as f: for line in f: if not line.strip(): continue try: entry = json.loads(line) except json.JSONDecodeError: continue # Skip corrupted lines msg_type = entry.get('type') message = entry.get('message', {}) content = message.get('content', []) role = message.get('role', msg_type) # Handle user messages (content is a string) if isinstance(content, str): text = content.strip() if text: if last_role != role: transcript_lines.append(f"# {role.upper()}") transcript_lines.append("") last_role = role transcript_lines.append(text) transcript_lines.append("") continue # Handle assistant messages (content is a list of blocks) if not isinstance(content, list): continue # Extract text blocks only text_parts = [] for block in content: if not isinstance(block, dict): continue block_type = block.get('type') # Extract text content if block_type == 'text': text = block.get('text', '').strip() if text: text_parts.append(text) # Extract thinking blocks (preserve them) elif block_type == 'thinking': thinking = block.get('thinking', '').strip() if thinking: text_parts.append(f"<thinking>\n{thinking}\n</thinking>") # Add to transcript if we have text if text_parts: if last_role != role: transcript_lines.append(f"# {role.upper()}") transcript_lines.append("") last_role = role for part in text_parts: transcript_lines.append(part) transcript_lines.append("") return "\n".join(transcript_lines) def main(): """PreCompact hook entry point.""" try: # Get session file session_file = None session_id = None hook_data = None # Try stdin (hook data) try: hook_data = json.load(sys.stdin) if 'transcript_path' in hook_data and 'session_id' in hook_data: session_file = Path(hook_data['transcript_path']) session_id = hook_data['session_id'] except (json.JSONDecodeError, KeyError, ValueError): # Fallback to args if not session_file and len(sys.argv) > 1: session_file = Path(sys.argv[1]) if not session_file or not session_file.exists(): print("No session file found - skipping transcript extraction", file=sys.stderr) sys.exit(1) # Extract session ID from filename (e.g., "049c4152-c17e-42f2-9d51-8c4864f20b75.jsonl") session_id = session_file.stem # Extract transcript for this session new_transcript = extract_transcript(session_file, session_id) if not new_transcript: print("No transcript content extracted", file=sys.stderr) sys.exit(0) # Accumulate with existing transcript (prepend old history) cache = WorkspaceFiles.instance().transcript existing = cache.content if cache.content else "" cache.write(existing + "\n\n" + new_transcript) print(f"Transcript saved via WorkspaceFiles singleton", file=sys.stderr) # Exit 0: Allow CLI compaction to proceed sys.exit(0) except Exception as e: print(f"ERROR: {e}", file=sys.stderr) import traceback traceback.print_exc(file=sys.stderr) # Exit 0 even on error - don't block compaction sys.exit(0) if __name__ == '__main__': main()

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/y3i12/nabu_nisaba'

If you have feedback or need assistance with the MCP directory API, please join our Discord server