Skip to main content
Glama

switching_cost

Calculate cognitive switching costs between domains by analyzing open questions and concept overlap to quantify context-switch penalties.

Instructions

    Estimate cognitive cost of switching between domains.
    Factors: open questions left behind, shared concepts (overlap discount).
    Returns 0-1 score where lower = cheaper switch.
    

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
current_domainYes
target_domainYes

Implementation Reference

  • The handler function `switching_cost` which estimates the cognitive cost of switching between domains. It utilizes both summary data (from database) and raw conversation statistics (as a fallback).
    def switching_cost(current_domain: str, target_domain: str) -> str:
        """
        Estimate cognitive cost of switching between domains.
        Factors: open questions left behind, shared concepts (overlap discount).
        Returns 0-1 score where lower = cheaper switch.
        """
        db = get_summaries_db()
        if db:
            cur_rows = db.execute("""
                SELECT open_questions, concepts, thinking_stage
                FROM summaries WHERE domain_primary = ?
            """, [current_domain]).fetchall()
    
            tgt_rows = db.execute("""
                SELECT open_questions, concepts, thinking_stage
                FROM summaries WHERE domain_primary = ?
            """, [target_domain]).fetchall()
    
            if not cur_rows:
                return f"No data for current domain: {current_domain}"
            if not tgt_rows:
                return f"No data for target domain: {target_domain}"
    
            cur_oq = set()
            cur_concepts = set()
            for row in cur_rows:
                for q in parse_json_field(row[0]):
                    if q and q.lower() != "none identified":
                        cur_oq.add(q)
                for c in parse_json_field(row[1]):
                    cur_concepts.add(c)
            cur_stage = cur_rows[0][2] or "unknown"
    
            tgt_concepts = set()
            for row in tgt_rows:
                for c in parse_json_field(row[1]):
                    tgt_concepts.add(c)
            tgt_stage = tgt_rows[0][2] or "unknown"
    
            shared = cur_concepts & tgt_concepts
            oq_cost = min(len(cur_oq) / 10.0, 1.0)
            overlap_discount = min(len(shared) / max(len(cur_concepts), 1), 1.0)
            stage_cost = {"executing": 0.8, "refining": 0.6, "crystallizing": 0.4, "exploring": 0.2}.get(cur_stage, 0.3)
            score = round((oq_cost * 0.35) + (stage_cost * 0.35) - (overlap_discount * 0.3), 3)
            score = max(0.0, min(1.0, score))
    
            if score < 0.3:
                rec = "✅ Low cost — go for it"
            elif score < 0.6:
                rec = "⚠️ Moderate — consider noting current open questions first"
            else:
                rec = "🔴 High cost — significant unfinished work in current domain"
    
            output = [f"## 🔀 Switching Cost: {current_domain} → {target_domain}\n"]
            output.append(f"### Score: **{score}** / 1.0  ({rec})\n")
            output.append(f"**Current domain:** {current_domain}")
            output.append(f"  Stage: {cur_stage}")
            output.append(f"  Open questions: {len(cur_oq)}")
            output.append(f"  Concepts: {len(cur_concepts)}")
            output.append(f"\n**Target domain:** {target_domain}")
            output.append(f"  Stage: {tgt_stage}")
            output.append(f"  Conversations: {len(tgt_rows)}")
            output.append(f"  Concepts: {len(tgt_concepts)}")
            output.append(f"\n**Overlap:** {len(shared)} shared concepts")
            if shared:
                output.append(f"  {', '.join(sorted(shared)[:10])}")
            output.append(f"\n**Cost breakdown:**")
            output.append(f"  Abandonment (open Qs): {oq_cost:.2f}")
            output.append(f"  Stage penalty ({cur_stage}): {stage_cost:.2f}")
            output.append(f"  Overlap discount: -{overlap_discount:.2f}")
            if cur_oq:
                output.append(f"\n**Questions you'd leave behind:**")
                for q in list(cur_oq)[:5]:
                    output.append(f"  ❓ {q}")
    
            return "\n".join(output)
    
        # ── Fallback: raw conversations ──
        try:
            con = get_conversations()
        except FileNotFoundError:
            return "No conversation data found. Run the ingest pipeline first."
    
        cur_pattern = f"%{current_domain}%"
        tgt_pattern = f"%{target_domain}%"
    
        cur_stats = con.execute("""
            SELECT COUNT(*) as msgs,
                   SUM(CASE WHEN has_question = 1 AND role = 'user' THEN 1 ELSE 0 END) as questions,
                   MAX(created) as last_active
            FROM conversations
            WHERE content ILIKE ? OR conversation_title ILIKE ?
        """, [cur_pattern, cur_pattern]).fetchone()
    
        tgt_stats = con.execute("""
            SELECT COUNT(*) as msgs,
                   SUM(CASE WHEN has_question = 1 AND role = 'user' THEN 1 ELSE 0 END) as questions,
                   MAX(created) as last_active
            FROM conversations
            WHERE content ILIKE ? OR conversation_title ILIKE ?
        """, [tgt_pattern, tgt_pattern]).fetchone()
    
        cur_msgs, cur_qs, cur_last = cur_stats or (0, 0, None)
        tgt_msgs, tgt_qs, tgt_last = tgt_stats or (0, 0, None)
    
        if not cur_msgs:
            return f"No data for current domain: {current_domain}"
        if not tgt_msgs:
            return f"No data for target domain: {target_domain}"
    
        # Check shared conversation titles
        shared_titles = con.execute("""
            SELECT COUNT(DISTINCT conversation_title) FROM conversations
            WHERE (content ILIKE ? OR conversation_title ILIKE ?)
              AND conversation_title IN (
                  SELECT DISTINCT conversation_title FROM conversations
                  WHERE content ILIKE ? OR conversation_title ILIKE ?
              )
        """, [cur_pattern, cur_pattern, tgt_pattern, tgt_pattern]).fetchone()[0]
    
        # Rough heuristic: questions = attachment, shared titles = overlap
        cur_qs = cur_qs or 0
        question_cost = min(cur_qs / 10.0, 1.0)
        volume_cost = min(cur_msgs / 100.0, 0.5)
        overlap_discount = min(shared_titles / max(cur_msgs / 10, 1), 1.0)
        score = round((question_cost * 0.4) + (volume_cost * 0.3) - (overlap_discount * 0.3), 3)
        score = max(0.0, min(1.0, score))
    
        if score < 0.3:
            rec = "✅ Low cost — go for it"
        elif score < 0.6:
            rec = "⚠️ Moderate — consider noting current questions first"
        else:
            rec = "🔴 High cost — significant activity in current domain"
    
        output = [f"## 🔀 Switching Cost: {current_domain} → {target_domain} (estimated)\n"]
        output.append(f"### Score: **{score}** / 1.0  ({rec})\n")
        output.append(f"**Current domain:** {current_domain}")
        output.append(f"  Messages: {cur_msgs}")
        output.append(f"  Questions: {cur_qs}")
        output.append(f"  Last active: {str(cur_last)[:10] if cur_last else 'unknown'}")
        output.append(f"\n**Target domain:** {target_domain}")
        output.append(f"  Messages: {tgt_msgs}")
        output.append(f"  Questions: {tgt_qs}")
        output.append(f"  Last active: {str(tgt_last)[:10] if tgt_last else 'unknown'}")
        output.append(f"\n**Shared conversations:** {shared_titles}")
        output.append(f"\n**Cost breakdown (approximate):**")
        output.append(f"  Question attachment: {question_cost:.2f}")
        output.append(f"  Volume penalty: {volume_cost:.2f}")
        output.append(f"  Overlap discount: -{overlap_discount:.2f}")
        output.append(_SUMMARIES_HINT)
        return "\n".join(output)
  • Tool definition and registration of 'switching_cost' in the TOOLS list within the dashboard routes.
        "name": "switching_cost",
        "description": "Quantified penalty for switching between domains",
        "category": "Cognitive Prosthetic",
        "requires": ["summaries"],
        "params": {"current_domain": "str", "target_domain": "str"},
        "probe": {"current_domain": "ai-dev", "target_domain": "frontend-dev"},
    },

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/mordechaipotash/brain-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server