Skip to main content
Glama

trust_dashboard

Monitor system-wide statistics to verify prosthetic functionality by displaying preserved conversations, domains, questions, and decisions for operational assurance.

Instructions

    System-wide stats proving the prosthetic works.
    Shows everything that's preserved: conversations, domains, questions, decisions.
    The 'everything is okay' view.
    

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault

No arguments

Implementation Reference

  • The implementation of the 'trust_dashboard' tool within the Cognitive Prosthetic toolset. It queries the summaries database to provide a status overview, or falls back to raw conversation data if the summaries database is unavailable.
    def trust_dashboard() -> str:
        """
        System-wide stats proving the prosthetic works.
        Shows everything that's preserved: conversations, domains, questions, decisions.
        The 'everything is okay' view.
        """
        db = get_summaries_db()
        if db:
            stats = db.execute("""
                SELECT COUNT(*) as total,
                       COUNT(DISTINCT domain_primary) as domains,
                       COUNT(CASE WHEN importance = 'breakthrough' THEN 1 END) as breakthroughs
                FROM summaries
            """).fetchone()
            total, domains, breakthroughs = stats
    
            rows = db.execute("SELECT open_questions, decisions FROM summaries").fetchall()
            total_oq, total_dec = 0, 0
            for oq_str, dec_str in rows:
                oqs = parse_json_field(oq_str)
                total_oq += sum(1 for q in oqs if q and q.lower() != "none identified")
                total_dec += len(parse_json_field(dec_str))
    
            sources = db.execute("""
                SELECT source, COUNT(*) as count FROM summaries GROUP BY source ORDER BY count DESC
            """).fetchall()
    
            domain_rows = db.execute("""
                SELECT domain_primary, COUNT(*) as count,
                       MAX(thinking_stage) as stage,
                       COUNT(CASE WHEN importance='breakthrough' THEN 1 END) as bt
                FROM summaries
                WHERE domain_primary != '' AND domain_primary IS NOT NULL
                GROUP BY domain_primary ORDER BY count DESC
            """).fetchall()
    
            oq_domains = set()
            for row in db.execute("SELECT domain_primary, open_questions FROM summaries").fetchall():
                for q in parse_json_field(row[1]):
                    if q and q.lower() != "none identified":
                        oq_domains.add(row[0])
                        break
    
            output = [f"## πŸ›‘οΈ Trust Dashboard\n"]
            output.append(f"_Your cognitive safety net β€” proof that nothing is lost_\n")
            output.append(f"### πŸ“Š Global Metrics")
            output.append(f"  **Conversations indexed:** {total:,}")
            output.append(f"  **Domains tracked:** {domains}")
            output.append(f"  **Open questions preserved:** {total_oq:,}")
            output.append(f"  **Decisions preserved:** {total_dec:,}")
            output.append(f"  **Breakthroughs captured:** {breakthroughs} πŸ’Ž")
            output.append(f"  **Domains with active threads:** {len(oq_domains)}")
    
            output.append(f"\n### πŸ“‘ Sources")
            for src, count in sources:
                output.append(f"  {src}: {count:,}")
    
            output.append(f"\n### πŸ—ΊοΈ Domain Coverage (top 15)")
            for domain, count, stage, bt in domain_rows[:15]:
                bt_mark = f" πŸ’ŽΓ—{bt}" if bt else ""
                has_oq = " πŸ”΄" if domain in oq_domains else " βœ…"
                output.append(f"  {domain}: {count} convos ({stage or '?'}){bt_mark}{has_oq}")
    
            output.append(f"\n### πŸ”‘ Safety Net Status")
            output.append(f"  {'🟒' if total > 5000 else '🟑'} Coverage: {total:,} conversations")
            output.append(f"  {'🟒' if domains > 15 else '🟑'} Breadth: {domains} domains")
            output.append(f"  {'🟒' if breakthroughs > 10 else '🟑'} Depth: {breakthroughs} breakthroughs captured")
            output.append(f"  {'πŸ”΄' if len(oq_domains) > 10 else '🟒'} Open threads: {len(oq_domains)} domains need attention")
    
            return "\n".join(output)
    
        # ── Fallback: stats from raw conversations + vectors ──
        try:
            con = get_conversations()
        except FileNotFoundError:
            return "No conversation data found. Run the ingest pipeline first."
    
        stats = con.execute("""
            SELECT COUNT(*) as total,
                   COUNT(DISTINCT conversation_title) as convos,
                   COUNT(DISTINCT source) as sources,
                   MIN(created) as first_msg,
                   MAX(created) as last_msg,
                   SUM(CASE WHEN has_question = 1 AND role = 'user' THEN 1 ELSE 0 END) as questions
            FROM conversations
        """).fetchone()
        total, convos, n_sources, first_msg, last_msg, total_qs = stats
    
        source_rows = con.execute("""
            SELECT source, COUNT(*) as count
            FROM conversations
            GROUP BY source
            ORDER BY count DESC
        """).fetchall()
    
        vec_count = lance_count()
    
        output = [f"## πŸ›‘οΈ Trust Dashboard\n"]
        output.append(f"_Your cognitive safety net β€” what's preserved_\n")
    
        output.append(f"### πŸ“Š Data Inventory")
        output.append(f"  **Total messages:** {total:,}")
        output.append(f"  **Conversations:** {convos:,}")
        output.append(f"  **Questions captured:** {total_qs:,}")
        output.append(f"  **Vectors indexed:** {vec_count:,}")
        output.append(f"  **Date range:** {str(first_msg)[:10]} to {str(last_msg)[:10]}")
    
        if source_rows:
            output.append(f"\n### πŸ“‘ Sources")
            for src, count in source_rows:
                output.append(f"  {src}: {count:,}")
    
        output.append(f"\n### πŸ”‘ Pipeline Status")
        output.append(f"  🟒 Conversations: {total:,} messages ingested")
        output.append(f"  {'🟒' if vec_count > 0 else 'πŸ”΄'} Vectors: {vec_count:,} embeddings indexed")
        output.append(f"  πŸ”΄ Summaries: not generated")
        output.append(f"\n### What's available without summaries")
        output.append(f"  - Keyword search across all conversations")
        output.append(f"  - Semantic (vector) search")
        output.append(f"  - Basic conversation stats and history")
        output.append(f"  - Approximate open threads and switching costs")
        output.append(f"\n### What summaries add")
        output.append(f"  - Thinking stages, cognitive patterns, emotional tones")
        output.append(f"  - Structured open questions and decisions")
        output.append(f"  - Domain classification and breakthrough detection")
        output.append(f"  - Key insights and quotable moments")
        output.append(_SUMMARIES_HINT)
        return "\n".join(output)
  • Registration entry for 'trust_dashboard' in the dashboard tools registry, including metadata like description, category, and requirements.
        "name": "trust_dashboard",
        "description": "System-wide proof the safety net works",
        "category": "Cognitive Prosthetic",
        "requires": ["conversations", "summaries"],
        "params": {},
        "probe": {},
    },

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/mordechaipotash/brain-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server