Skip to main content
Glama

what_do_i_think

Analyze topics to synthesize comprehensive perspectives or find similar past situations for informed decision-making.

Instructions

    Synthesize what you think about a topic, or find similar past situations.

    Args:
        topic: The topic or situation to analyze
        mode: Analysis mode:
            - "synthesize" (default): Full synthesis with decisions, open questions, quotes
            - "precedent": Find similar past situations with context and decisions made
    

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
topicYes
modeNosynthesize

Implementation Reference

  • The main handler for the 'what_do_i_think' tool, which performs synthesis based on a given topic, including precedent search or full synthesis using embedded summaries.
    def what_do_i_think(topic: str, mode: str = "synthesize") -> str:
        """
        Synthesize what you think about a topic, or find similar past situations.
    
        Args:
            topic: The topic or situation to analyze
            mode: Analysis mode:
                - "synthesize" (default): Full synthesis with decisions, open questions, quotes
                - "precedent": Find similar past situations with context and decisions made
        """
        if mode == "precedent":
            return _find_precedent(topic)
    
        output = [f"## What do I think about: {topic}\n"]
    
        embedding = get_embedding(f"search_query: {topic}")
        if not embedding:
            return "Could not generate embedding for topic."
    
        lance = get_summaries_lance()
        if not lance:
            return _what_do_i_think_raw(topic, embedding, output)
    
        try:
            tbl = lance.open_table(SUMMARIES_TABLE)
            results = tbl.search(embedding).limit(20).to_list()
        except Exception as e:
            return f"Summary search error: {e}"
    
        if not results:
            output.append("_No structured thoughts found on this topic._")
            return "\n".join(output)
    
        # Prioritize by importance
        importance_order = {"breakthrough": 0, "significant": 1, "routine": 2}
        results.sort(key=lambda r: importance_order.get(r.get("importance", "routine"), 2))
    
        all_decisions = []
        all_open_questions = []
        all_quotables = []
        summaries_shown = 0
    
        output.append("### Summary of Thinking\n")
        for r in results[:10]:
            title = r.get("title", "Untitled") or "Untitled"
            summary = r.get("summary", "")
            importance = r.get("importance", "?")
            domain = r.get("domain_primary", "?")
            stage = r.get("thinking_stage", "?")
            conv_id = r.get("conversation_id", "?")
    
            if summary and summaries_shown < 5:
                imp_icon = {"breakthrough": "πŸ”₯", "significant": "⭐", "routine": "πŸ“"}.get(importance, "πŸ“")
                output.append(f"{imp_icon} **{title}** [{domain} | {stage}]")
                output.append(f"> {summary[:300]}{'...' if len(summary) > 300 else ''}")
                output.append(f"_ID: {conv_id[:20]}..._\n")
                summaries_shown += 1
    
            for d in parse_json_field(r.get("decisions")):
                if d and "none identified" not in str(d).lower():
                    all_decisions.append((d, title, conv_id))
    
            for q in parse_json_field(r.get("open_questions")):
                if q and "none identified" not in str(q).lower():
                    all_open_questions.append((q, title, conv_id))
    
            for q in parse_json_field(r.get("quotable")):
                if q and "none identified" not in str(q).lower():
                    all_quotables.append((q, title))
    
        if all_decisions:
            output.append("### Key Decisions\n")
            seen = set()
            for decision, title, _ in all_decisions[:10]:
                d_key = decision[:80].lower()
                if d_key not in seen:
                    seen.add(d_key)
                    output.append(f"- {decision[:200]}")
                    output.append(f"  _From: {title}_")
    
        if all_open_questions:
            output.append("\n### Still Open\n")
            seen = set()
            for question, title, _ in all_open_questions[:8]:
                q_key = question[:80].lower()
                if q_key not in seen:
                    seen.add(q_key)
                    output.append(f"- {question[:200]}")
                    output.append(f"  _From: {title}_")
    
        if all_quotables:
            output.append("\n### Authentic Quotes\n")
            for quote, title in all_quotables[:5]:
                output.append(f"> \"{quote[:250]}\"")
                output.append(f"> β€” _{title}_\n")
    
        return "\n".join(output)
  • Tool registration using the MCP decorator.
    @mcp.tool()
    def what_do_i_think(topic: str, mode: str = "synthesize") -> str:
  • Fallback handler '_what_do_i_think_raw' used when structured summaries are unavailable.
    def _what_do_i_think_raw(topic: str, embedding: list[float], output: list[str]) -> str:
        """Fallback synthesis from raw messages when summaries aren't available."""
        cfg = get_config()
    
        # Semantic matches
        semantic_results = []
        if cfg.lance_path.exists():
            semantic_results = lance_search(embedding, limit=20, min_sim=0.3)
    
        # Keyword matches from conversations
        keyword_results = []
        try:
            con = get_conversations()
            keyword_results = con.execute("""
                SELECT conversation_title, substr(content, 1, 300) as preview,
                       created, conversation_id
                FROM conversations
                WHERE content ILIKE ? AND role = 'user'
                ORDER BY created DESC
                LIMIT 10
            """, [f"%{topic}%"]).fetchall()
        except Exception:
            pass
    
        if not semantic_results and not keyword_results:
            output.append("_No thoughts found on this topic._")
            return "\n".join(output)
    
        if semantic_results:
            output.append("### Semantically Related Thoughts\n")
            for title, content, year, month, sim in semantic_results[:10]:
                preview = content[:250] + "..." if len(content) > 250 else content
                output.append(f"**[{year}-{month:02d}]** {title or 'Untitled'} (sim: {sim:.2f})")
                output.append(f"> {preview}\n")
    
        if keyword_results:
            output.append("### Direct Mentions\n")
            seen_convos = set()
            for title, preview, created, conv_id in keyword_results:
                if conv_id in seen_convos:
                    continue
                seen_convos.add(conv_id)
                date_str = str(created)[:10]
                output.append(f"**[{date_str}]** {title or 'Untitled'}")
                output.append(f"> {preview}{'...' if len(preview) >= 300 else ''}\n")
    
        output.append("---")
        output.append("_Running without summaries. For richer synthesis with "
                       "decisions, open questions, and quotes: `brain-mcp summarize`_")
        return "\n".join(output)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/mordechaipotash/brain-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server