Skip to main content
Glama

export_section_packet_v1

Generate JSON packets with evidence, paper matrices, and citations for AI-assisted academic writing and literature review automation.

Instructions

导出章节写作输入包

生成包含所有必要信息的 JSON,供 Agent 写作使用。

Args: pack_id: 证据包 ID

Returns: evidence[], paper_matrix[], claim_matrix[], doc_citations[]

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
pack_idYes

Implementation Reference

  • Full implementation of the export_section_packet_v1 tool handler. Queries database for pack contents, builds structured output with evidence chunks, paper metadata matrix, claim matrix, and citation info.
    @mcp.tool()
    def export_section_packet_v1(pack_id: int) -> dict[str, Any]:
        """导出章节写作输入包
    
        生成包含所有必要信息的 JSON,供 Agent 写作使用。
    
        Args:
            pack_id: 证据包 ID
    
        Returns:
            evidence[], paper_matrix[], claim_matrix[], doc_citations[]
        """
        try:
            # 获取 pack 信息
            pack = query_one(
                "SELECT pack_id, query, params_json FROM evidence_packs WHERE pack_id = %s",
                (pack_id,),
            )
            if not pack:
                return {"error": f"Pack not found: {pack_id}"}
    
            # 获取所有 chunk 内容
            chunks = query_all(
                """
                SELECT
                    epi.doc_id,
                    epi.chunk_id,
                    epi.rank,
                    c.text,
                    c.page_start,
                    c.page_end,
                    d.title,
                    d.authors,
                    d.year
                FROM evidence_pack_items epi
                JOIN chunks c ON epi.chunk_id = c.chunk_id
                JOIN documents d ON epi.doc_id = d.doc_id
                WHERE epi.pack_id = %s
                ORDER BY epi.rank
                """,
                (pack_id,),
            )
    
            # 构建 evidence 列表
            evidence = []
            doc_ids = set()
            chunk_ids = []
    
            for chunk in chunks:
                doc_ids.add(chunk["doc_id"])
                chunk_ids.append(chunk["chunk_id"])
                evidence.append({
                    "doc_id": chunk["doc_id"],
                    "chunk_id": chunk["chunk_id"],
                    "page_start": chunk["page_start"],
                    "page_end": chunk["page_end"],
                    "text": chunk["text"],
                    "title": chunk["title"],
                    "authors": chunk["authors"],
                    "year": chunk["year"],
                    "citation_anchor": f"[[chunk:{chunk['chunk_id']}]]",
                })
    
            doc_ids_list = list(doc_ids)
    
            # 构建 paper_matrix
            paper_matrix = []
            for doc_id in doc_ids_list:
                doc = query_one(
                    "SELECT doc_id, title, authors, year FROM documents WHERE doc_id = %s",
                    (doc_id,),
                )
                if doc:
                    # 获取该文档关联的实体
                    entities = query_all(
                        """
                        SELECT DISTINCT e.type, e.canonical_name
                        FROM entities e
                        JOIN mentions m ON e.entity_id = m.entity_id
                        WHERE m.doc_id = %s
                        """,
                        (doc_id,),
                    )
    
                    entity_by_type: dict[str, list[str]] = defaultdict(list)
                    for ent in entities:
                        entity_by_type[ent["type"]].append(ent["canonical_name"])
    
                    paper_matrix.append({
                        "doc_id": doc_id,
                        "title": doc["title"],
                        "authors": doc["authors"],
                        "year": doc["year"],
                        "topics": entity_by_type.get("Topic", []),
                        "measures": entity_by_type.get("MeasureProxy", []),
                        "identification_strategies": entity_by_type.get("IdentificationStrategy", []),
                        "methods": entity_by_type.get("Method", []),
                        "settings": entity_by_type.get("Setting", []),
                        "limitations": entity_by_type.get("LimitationGap", []),
                    })
    
            # 构建 claim_matrix
            claim_matrix = []
            if chunk_ids:
                claims = query_all(
                    """
                    SELECT claim_id, doc_id, chunk_id, claim_text, sign, conditions, confidence
                    FROM claims
                    WHERE chunk_id = ANY(%s)
                    ORDER BY confidence DESC
                    """,
                    (chunk_ids,),
                )
                for claim in claims:
                    claim_matrix.append({
                        "claim_id": claim["claim_id"],
                        "doc_id": claim["doc_id"],
                        "chunk_id": claim["chunk_id"],
                        "claim_text": claim["claim_text"],
                        "sign": claim["sign"],
                        "conditions": claim["conditions"] or {},
                        "confidence": claim["confidence"],
                        "citation_anchor": f"[[chunk:{claim['chunk_id']}]]",
                    })
    
            # 获取引用信息
            doc_citations = []
            for doc_id in doc_ids_list:
                citation = query_one(
                    """
                    SELECT d.doc_id, d.title, d.authors, d.year,
                           c.bibtex, c.apa
                    FROM documents d
                    LEFT JOIN citations c ON d.doc_id = c.doc_id
                    WHERE d.doc_id = %s
                    """,
                    (doc_id,),
                )
                if citation:
                    doc_citations.append({
                        "doc_id": citation["doc_id"],
                        "title": citation["title"],
                        "authors": citation["authors"],
                        "year": citation["year"],
                        "bibtex": citation["bibtex"],
                        "apa": citation["apa"],
                    })
    
            return {
                "pack_id": pack_id,
                "query": pack["query"],
                "evidence": evidence,
                "paper_matrix": paper_matrix,
                "claim_matrix": claim_matrix,
                "doc_citations": doc_citations,
                "stats": {
                    "total_chunks": len(evidence),
                    "unique_docs": len(doc_ids_list),
                    "total_claims": len(claim_matrix),
                },
            }
    
        except Exception as e:
            return {"error": str(e)}
  • Imports and calls register_review_tools(mcp) to register the review tools including export_section_packet_v1 in the main MCP server.
    from paperlib_mcp.tools.review import register_review_tools
    
    # M4 Canonicalization & Grouping 工具
    from paperlib_mcp.tools.graph_relation_canonicalize import register_graph_relation_canonicalize_tools
    from paperlib_mcp.tools.graph_claim_grouping import register_graph_claim_grouping_tools
    from paperlib_mcp.tools.graph_v12 import register_graph_v12_tools
    
    register_health_tools(mcp)
    register_import_tools(mcp)
    register_search_tools(mcp)
    register_fetch_tools(mcp)
    register_writing_tools(mcp)
    
    # 注册 M2 GraphRAG 工具
    register_graph_extract_tools(mcp)
    register_graph_canonicalize_tools(mcp)
    register_graph_community_tools(mcp)
    register_graph_summarize_tools(mcp)
    register_graph_maintenance_tools(mcp)
    
    # 注册 M3 Review 工具
    register_review_tools(mcp)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/h-lu/paperlib-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server