trust_dashboard
Monitor system-wide statistics to verify prosthetic functionality by displaying preserved conversations, domains, questions, and decisions for operational assurance.
Instructions
System-wide stats proving the prosthetic works.
Shows everything that's preserved: conversations, domains, questions, decisions.
The 'everything is okay' view.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Implementation Reference
- The implementation of the 'trust_dashboard' tool within the Cognitive Prosthetic toolset. It queries the summaries database to provide a status overview, or falls back to raw conversation data if the summaries database is unavailable.
def trust_dashboard() -> str: """ System-wide stats proving the prosthetic works. Shows everything that's preserved: conversations, domains, questions, decisions. The 'everything is okay' view. """ db = get_summaries_db() if db: stats = db.execute(""" SELECT COUNT(*) as total, COUNT(DISTINCT domain_primary) as domains, COUNT(CASE WHEN importance = 'breakthrough' THEN 1 END) as breakthroughs FROM summaries """).fetchone() total, domains, breakthroughs = stats rows = db.execute("SELECT open_questions, decisions FROM summaries").fetchall() total_oq, total_dec = 0, 0 for oq_str, dec_str in rows: oqs = parse_json_field(oq_str) total_oq += sum(1 for q in oqs if q and q.lower() != "none identified") total_dec += len(parse_json_field(dec_str)) sources = db.execute(""" SELECT source, COUNT(*) as count FROM summaries GROUP BY source ORDER BY count DESC """).fetchall() domain_rows = db.execute(""" SELECT domain_primary, COUNT(*) as count, MAX(thinking_stage) as stage, COUNT(CASE WHEN importance='breakthrough' THEN 1 END) as bt FROM summaries WHERE domain_primary != '' AND domain_primary IS NOT NULL GROUP BY domain_primary ORDER BY count DESC """).fetchall() oq_domains = set() for row in db.execute("SELECT domain_primary, open_questions FROM summaries").fetchall(): for q in parse_json_field(row[1]): if q and q.lower() != "none identified": oq_domains.add(row[0]) break output = [f"## π‘οΈ Trust Dashboard\n"] output.append(f"_Your cognitive safety net β proof that nothing is lost_\n") output.append(f"### π Global Metrics") output.append(f" **Conversations indexed:** {total:,}") output.append(f" **Domains tracked:** {domains}") output.append(f" **Open questions preserved:** {total_oq:,}") output.append(f" **Decisions preserved:** {total_dec:,}") output.append(f" **Breakthroughs captured:** {breakthroughs} π") output.append(f" **Domains with active threads:** {len(oq_domains)}") output.append(f"\n### π‘ Sources") for src, count in sources: output.append(f" {src}: {count:,}") output.append(f"\n### πΊοΈ Domain Coverage (top 15)") for domain, count, stage, bt in domain_rows[:15]: bt_mark = f" πΓ{bt}" if bt else "" has_oq = " π΄" if domain in oq_domains else " β " output.append(f" {domain}: {count} convos ({stage or '?'}){bt_mark}{has_oq}") output.append(f"\n### π Safety Net Status") output.append(f" {'π’' if total > 5000 else 'π‘'} Coverage: {total:,} conversations") output.append(f" {'π’' if domains > 15 else 'π‘'} Breadth: {domains} domains") output.append(f" {'π’' if breakthroughs > 10 else 'π‘'} Depth: {breakthroughs} breakthroughs captured") output.append(f" {'π΄' if len(oq_domains) > 10 else 'π’'} Open threads: {len(oq_domains)} domains need attention") return "\n".join(output) # ββ Fallback: stats from raw conversations + vectors ββ try: con = get_conversations() except FileNotFoundError: return "No conversation data found. Run the ingest pipeline first." stats = con.execute(""" SELECT COUNT(*) as total, COUNT(DISTINCT conversation_title) as convos, COUNT(DISTINCT source) as sources, MIN(created) as first_msg, MAX(created) as last_msg, SUM(CASE WHEN has_question = 1 AND role = 'user' THEN 1 ELSE 0 END) as questions FROM conversations """).fetchone() total, convos, n_sources, first_msg, last_msg, total_qs = stats source_rows = con.execute(""" SELECT source, COUNT(*) as count FROM conversations GROUP BY source ORDER BY count DESC """).fetchall() vec_count = lance_count() output = [f"## π‘οΈ Trust Dashboard\n"] output.append(f"_Your cognitive safety net β what's preserved_\n") output.append(f"### π Data Inventory") output.append(f" **Total messages:** {total:,}") output.append(f" **Conversations:** {convos:,}") output.append(f" **Questions captured:** {total_qs:,}") output.append(f" **Vectors indexed:** {vec_count:,}") output.append(f" **Date range:** {str(first_msg)[:10]} to {str(last_msg)[:10]}") if source_rows: output.append(f"\n### π‘ Sources") for src, count in source_rows: output.append(f" {src}: {count:,}") output.append(f"\n### π Pipeline Status") output.append(f" π’ Conversations: {total:,} messages ingested") output.append(f" {'π’' if vec_count > 0 else 'π΄'} Vectors: {vec_count:,} embeddings indexed") output.append(f" π΄ Summaries: not generated") output.append(f"\n### What's available without summaries") output.append(f" - Keyword search across all conversations") output.append(f" - Semantic (vector) search") output.append(f" - Basic conversation stats and history") output.append(f" - Approximate open threads and switching costs") output.append(f"\n### What summaries add") output.append(f" - Thinking stages, cognitive patterns, emotional tones") output.append(f" - Structured open questions and decisions") output.append(f" - Domain classification and breakthrough detection") output.append(f" - Key insights and quotable moments") output.append(_SUMMARIES_HINT) return "\n".join(output) - brain_mcp/dashboard/routes/tools.py:85-91 (registration)Registration entry for 'trust_dashboard' in the dashboard tools registry, including metadata like description, category, and requirements.
"name": "trust_dashboard", "description": "System-wide proof the safety net works", "category": "Cognitive Prosthetic", "requires": ["conversations", "summaries"], "params": {}, "probe": {}, },