open_threads
Track and manage all open questions across multiple domains to maintain an organized 'unfinished business' dashboard for AI agents.
Instructions
Global inventory of ALL open questions across ALL domains. The 'unfinished business' dashboard.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| limit_per_domain | No | ||
| max_domains | No |
Implementation Reference
- The implementation of the `open_threads` tool, which retrieves and organizes open questions across domains from the database, or falls back to raw conversation data if necessary.
def open_threads(limit_per_domain: int = 5, max_domains: int = 20) -> str: """ Global inventory of ALL open questions across ALL domains. The 'unfinished business' dashboard. """ db = get_summaries_db() if db: rows = db.execute(""" SELECT domain_primary, open_questions, importance, thinking_stage FROM summaries WHERE domain_primary != '' AND domain_primary IS NOT NULL """).fetchall() if not rows: return "No data found." domain_data = {} for domain, oq_str, importance, stage in rows: if domain not in domain_data: domain_data[domain] = {"questions": [], "count": 0, "bt": 0, "stage": stage} domain_data[domain]["count"] += 1 if importance == "breakthrough": domain_data[domain]["bt"] += 1 for q in parse_json_field(oq_str): if q and q.lower() != "none identified" and q not in domain_data[domain]["questions"]: domain_data[domain]["questions"].append(q) active = [(d, v) for d, v in domain_data.items() if v["questions"]] active.sort(key=lambda x: (-x[1]["bt"], -len(x[1]["questions"]))) total_q = sum(len(v["questions"]) for _, v in active) output = [f"## π§΅ Open Threads\n"] output.append(f"**{total_q} open questions** across **{len(active)} domains**\n") for domain, data in active[:max_domains]: bt = f" πΓ{data['bt']}" if data["bt"] else "" output.append(f"### {domain}{bt} ({len(data['questions'])} questions, {data['count']} convos)") for q in data["questions"][:limit_per_domain]: output.append(f" β {q}") if len(data["questions"]) > limit_per_domain: output.append(f" _... and {len(data['questions'])-limit_per_domain} more_") output.append("") return "\n".join(output) # ββ Fallback: user questions from raw conversations ββ try: con = get_conversations() except FileNotFoundError: return "No conversation data found. Run the ingest pipeline first." rows = con.execute(""" SELECT conversation_title, content, created FROM conversations WHERE role = 'user' AND has_question = 1 ORDER BY created DESC LIMIT 50 """).fetchall() if not rows: return "No questions found in conversations." # Group by conversation title topic_questions = {} for title, content, created in rows: key = title or "Untitled" if key not in topic_questions: topic_questions[key] = [] topic_questions[key].append((content, created)) total_q = sum(len(qs) for qs in topic_questions.values()) output = [f"## π§΅ Open Threads (from raw conversations)\n"] output.append(f"**{total_q} recent questions** across **{len(topic_questions)} topics**\n") shown = 0 for topic, questions in list(topic_questions.items()): if shown >= max_domains: break output.append(f"### {topic} ({len(questions)} questions)") for content, created in questions[:limit_per_domain]: snippet = (content or "")[:200].replace("\n", " ") output.append(f" β {snippet}") if len(questions) > limit_per_domain: output.append(f" _... and {len(questions)-limit_per_domain} more_") output.append("") shown += 1 output.append(_SUMMARIES_HINT) return "\n".join(output)