Skip to main content
Glama

workflowy_scry

Load complete WorkFlowy node trees via API to access full hierarchical data when parent UUIDs are missing or expansion state is unknown.

Instructions

Load entire node tree via API (bypass WebSocket). Use when Key Files doesn't have parent UUID for ETCH, or when Dan wants complete tree regardless of expansion state.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
node_idYes
depthNo
size_limitNo
output_fileNo

Implementation Reference

  • MCP tool registration decorator for 'workflowy_scry' tool, defining its name and description.
    @mcp.tool( name="workflowy_scry", description="Load entire node tree via API (bypass WebSocket). Use when Key Files doesn't have parent UUID for ETCH, or when Dan wants complete tree regardless of expansion state." )
  • MCP-exposed handler function for workflowy_scry tool. Thin wrapper that acquires rate limit, calls client.workflowy_scry, handles result, and returns to MCP client.
    async def glimpse_full( node_id: str, depth: int | None = None, size_limit: int = 1000, output_file: str | None = None, ) -> dict: """Load entire node tree via full API fetch (bypass WebSocket). Thin wrapper around workflowy_glimpse that forces API fetch. Use when: - Agent needs to hunt for parent UUIDs not in Key Files - Dan wants complete node tree regardless of expansion - WebSocket selective extraction not needed Args: node_id: Root node UUID to read from depth: Maximum depth to traverse (1=direct children only, 2=two levels, None=full tree) size_limit: Maximum number of nodes to return (default 1000, raises error if exceeded) Returns: Same format as workflowy_glimpse with _source="api" """ client = get_client() if _rate_limiter: await _rate_limiter.acquire() try: # Call glimpse_full on client (bypasses WebSocket by design) result = await client.workflowy_scry( node_id, depth=depth, size_limit=size_limit, output_file=output_file, ) if _rate_limiter: _rate_limiter.on_success() return result except Exception as e: if _rate_limiter and hasattr(e, "__class__") and e.__class__.__name__ == "RateLimitError": _rate_limiter.on_rate_limit(getattr(e, "retry_after", None)) raise
  • Core implementation of workflowy_scry in WorkFlowyClient: fetches node tree via API (/nodes-export or traversal), builds hierarchy, handles depth/size limits with truncation semantics, annotates completeness status, generates preview tree.
    async def workflowy_scry( self, node_id: str, use_efficient_traversal: bool = False, depth: int | None = None, size_limit: int = 1000, output_file: str | None = None, ) -> dict[str, Any]: """Load node tree via API (Mode 2 - Agent hunts).""" logger = _ClientLogger() # Delegate to bulk_export if output_file requested if output_file is not None: result = await bulk_export_to_file_impl( self, node_id=node_id, output_file=output_file, include_metadata=True, use_efficient_traversal=use_efficient_traversal, max_depth=depth, child_count_limit=None, max_nodes=size_limit, ) return { "success": True, "mode": "file", "terrain_file": output_file, "markdown_file": result.get("markdown_file"), "node_count": result.get("node_count"), "depth": result.get("depth"), "_source": "api", } # In-memory return if use_efficient_traversal: from collections import deque from ..models import NodeListRequest flat_nodes = [] queue = deque([node_id]) visited = set() while queue: parent = queue.popleft() if parent in visited: continue visited.add(parent) request = NodeListRequest(parentId=parent) children, count = await self.list_nodes(request) for child in children: child_dict = child.model_dump() parent_id_val = child_dict.get("parent_id") or child_dict.get("parentId") if not parent_id_val: child_dict["parent_id"] = parent flat_nodes.append(child_dict) queue.append(child.id) root_node_data = await self.get_node(node_id) flat_nodes.insert(0, root_node_data.model_dump()) else: raw_data = await export_nodes_impl(self, node_id) flat_nodes = raw_data.get("nodes", []) if not flat_nodes: return { "success": True, "root": None, "children": [], "node_count": 0, "depth": 0, "_source": "api" } # Size limit handling # Old behavior: hard error when subtree exceeds size_limit. # New behavior (ground-truth semantics): optionally TRUNCATE instead of erroring, # and mark nodes with children_status='truncated_by_max_nodes'. # # Rationale: Exploration sessions must be able to ingest partial trees while # preserving epistemic completeness metadata. # # Default remains SAFE: if size_limit is exceeded and no truncation is requested, # raise NetworkError. allow_truncate_by_max_nodes = True did_truncate_by_max_nodes = False if size_limit and len(flat_nodes) > size_limit: if not allow_truncate_by_max_nodes: raise NetworkError( f"Tree size ({len(flat_nodes)}) exceeds limit ({size_limit}).\n\n" f"Options:\n" f"1. Increase size_limit\n" f"2. Use depth parameter\n" f"3. Use GLIMPSE (WebSocket)" ) did_truncate_by_max_nodes = True # Keep root + the first (size_limit - 1) nodes (conservative truncation). # Note: flat_nodes is a pre-order-ish list from /nodes-export filtered to subtree. # This truncation is intentionally coarse; downstream logic must treat it as incomplete. flat_nodes = flat_nodes[: max(1, size_limit)] # Build hierarchy hierarchical_tree = self._build_hierarchy(flat_nodes, True) # Ground truth completeness semantics: # workflowy_scry() (API) returns a full subtree (subject only to explicit size_limit/depth). # Therefore, we can safely mark children_status='complete' for all nodes we actually have. # NOTE: If a depth limit is applied below, those nodes will be truncated structurally # and must NOT remain marked complete (handled after depth limiting). def _stamp_children_status_complete(nodes: list[dict[str, Any]]) -> None: for n in nodes or []: if not isinstance(n, dict): continue n["children_status"] = "complete" _stamp_children_status_complete(n.get("children") or []) _stamp_children_status_complete(hierarchical_tree) # If we truncated the flat export due to size_limit, the returned tree is incomplete. # Mark all nodes as truncated_by_max_nodes (fail-closed). if did_truncate_by_max_nodes: def _stamp_children_status_truncated(nodes: list[dict[str, Any]]) -> None: for n in nodes or []: if not isinstance(n, dict): continue n["children_status"] = "truncated_by_max_nodes" _stamp_children_status_truncated(n.get("children") or []) _stamp_children_status_truncated(hierarchical_tree) # Attach preview try: preview_tree = self._annotate_preview_ids_and_build_tree(hierarchical_tree, "WS") except Exception: preview_tree = [] # Extract root root_metadata = None children = [] root_children_status = "complete" if hierarchical_tree and len(hierarchical_tree) == 1: root_node = hierarchical_tree[0] root_metadata = { "id": root_node.get('id'), "name": root_node.get('name'), "note": root_node.get('note'), "parent_id": root_node.get('parent_id') } children = root_node.get('children', []) root_children_status = root_node.get("children_status", "complete") else: target_root = next((r for r in hierarchical_tree if r.get("id") == node_id), None) if target_root: root_metadata = { "id": target_root.get('id'), "name": target_root.get('name'), "note": target_root.get('note'), "parent_id": target_root.get('parent_id') } children = target_root.get('children', []) root_children_status = target_root.get("children_status", "complete") else: children = hierarchical_tree root_children_status = "unknown" # Apply depth limit if depth is not None: children = self._limit_depth(children, depth) # After limiting depth, mark any nodes that were truncated by depth. # Rule: if a node has children in the ORIGINAL tree but now has children=[], it was truncated. def _mark_truncated_by_depth(nodes: list[dict[str, Any]], current_depth: int = 1) -> None: if not nodes: return for n in nodes: if not isinstance(n, dict): continue # At the cutoff depth, we forced children=[]. if current_depth >= depth: # If this node originally had children, it is truncated. # We cannot reliably know "original had children" after _limit_depth, # so we conservatively mark branches at cutoff depth as truncated when # they are not leaves in the flat export. # Easiest reliable signal: use child_count computed from flat_nodes. pass for ch in (n.get("children") or []): _mark_truncated_by_depth([ch], current_depth + 1) # Conservative but safe: any node at depth==depth that is NOT a leaf in the flat export # should be marked truncated_by_depth. We can detect that via a set of ids that had # children in the original hierarchy. ids_with_children: set[str] = set() try: # Build from hierarchical_tree (pre-limit) which still exists in local scope above. def _collect_ids_with_children(nodes0: list[dict[str, Any]]) -> None: for nn in nodes0 or []: if not isinstance(nn, dict): continue cid = nn.get("id") ch0 = nn.get("children") or [] if cid and isinstance(ch0, list) and len(ch0) > 0: ids_with_children.add(str(cid)) _collect_ids_with_children(ch0) _collect_ids_with_children(hierarchical_tree) except Exception: ids_with_children = set() def _stamp_depth_status(nodes1: list[dict[str, Any]], current_depth: int = 1) -> None: for nn in nodes1 or []: if not isinstance(nn, dict): continue nid = nn.get("id") if current_depth >= depth: if nid and str(nid) in ids_with_children: nn["children_status"] = "truncated_by_depth" _stamp_depth_status(nn.get("children") or [], current_depth + 1) _stamp_depth_status(children, current_depth=1) max_depth = self._calculate_max_depth(children) result = { "success": True, "_source": "api", "node_count": len(flat_nodes), "depth": max_depth, "preview_tree": preview_tree, "root": root_metadata, "children": children, "export_root_children_status": root_children_status, } _log_glimpse_to_file("glimpse_full", node_id, result) return result
  • Docstring reference to workflowy_scry as provider of api_glimpse data used in GLIMPSE merging.
    api_glimpse: Optional API GLIMPSE dict from ``workflowy_scry``.

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/daniel347x/workflowy-mcp-fixed'

If you have feedback or need assistance with the MCP directory API, please join our Discord server