scan_text_nodes
Extract and analyze all text nodes within a selected Figma design element to enable AI-assisted editing and content management through natural language commands.
Instructions
Scan all text nodes in the selected Figma node
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Input Schema (JSON Schema)
{
"$schema": "http://json-schema.org/draft-07/schema#",
"additionalProperties": false,
"properties": {},
"type": "object"
}
Implementation Reference
- MCP server tool registration and handler for 'scan_text_nodes'. Defines input schema (nodeId), description, and handler that forwards the command to Figma plugin via websocket with chunking enabled.server.tool( "scan_text_nodes", "Scan all text nodes in the selected Figma node", { nodeId: z.string().describe("ID of the node to scan"), }, async ({ nodeId }) => { try { // Initial response to indicate we're starting the process const initialStatus = { type: "text" as const, text: "Starting text node scanning. This may take a moment for large designs...", }; // Use the plugin's scan_text_nodes function with chunking flag const result = await sendCommandToFigma("scan_text_nodes", { nodeId, useChunking: true, // Enable chunking on the plugin side chunkSize: 10 // Process 10 nodes at a time }); // If the result indicates chunking was used, format the response accordingly if (result && typeof result === 'object' && 'chunks' in result) { const typedResult = result as { success: boolean, totalNodes: number, processedNodes: number, chunks: number, textNodes: Array<any> }; const summaryText = ` Scan completed: - Found ${typedResult.totalNodes} text nodes - Processed in ${typedResult.chunks} chunks `; return { content: [ initialStatus, { type: "text" as const, text: summaryText }, { type: "text" as const, text: JSON.stringify(typedResult.textNodes, null, 2) } ], }; } // If chunking wasn't used or wasn't reported in the result format, return the result as is return { content: [ initialStatus, { type: "text", text: JSON.stringify(result, null, 2), }, ], }; } catch (error) { return { content: [ { type: "text", text: `Error scanning text nodes: ${error instanceof Error ? error.message : String(error)}`, }, ], }; } } );
- src/claude_mcp_plugin/code.js:1332-1553 (handler)Core Figma plugin implementation of the 'scan_text_nodes' command. Handles recursive traversal of node subtree (with optional chunking for performance), extracts text node details, provides progress updates, and visual highlighting.async function scanTextNodes(params) { console.log(`Starting to scan text nodes from node ID: ${params.nodeId}`); const { nodeId, useChunking = true, chunkSize = 10, commandId = generateCommandId() } = params || {}; const node = await figma.getNodeByIdAsync(nodeId); if (!node) { console.error(`Node with ID ${nodeId} not found`); // Send error progress update sendProgressUpdate( commandId, 'scan_text_nodes', 'error', 0, 0, 0, `Node with ID ${nodeId} not found`, { error: `Node not found: ${nodeId}` } ); throw new Error(`Node with ID ${nodeId} not found`); } // If chunking is not enabled, use the original implementation if (!useChunking) { const textNodes = []; try { // Send started progress update sendProgressUpdate( commandId, 'scan_text_nodes', 'started', 0, 1, // Not known yet how many nodes there are 0, `Starting scan of node "${node.name || nodeId}" without chunking`, null ); await findTextNodes(node, [], 0, textNodes); // Send completed progress update sendProgressUpdate( commandId, 'scan_text_nodes', 'completed', 100, textNodes.length, textNodes.length, `Scan complete. Found ${textNodes.length} text nodes.`, { textNodes } ); return { success: true, message: `Scanned ${textNodes.length} text nodes.`, count: textNodes.length, textNodes: textNodes, commandId }; } catch (error) { console.error("Error scanning text nodes:", error); // Send error progress update sendProgressUpdate( commandId, 'scan_text_nodes', 'error', 0, 0, 0, `Error scanning text nodes: ${error.message}`, { error: error.message } ); throw new Error(`Error scanning text nodes: ${error.message}`); } } // Chunked implementation console.log(`Using chunked scanning with chunk size: ${chunkSize}`); // First, collect all nodes to process (without processing them yet) const nodesToProcess = []; // Send started progress update sendProgressUpdate( commandId, 'scan_text_nodes', 'started', 0, 0, // Not known yet how many nodes there are 0, `Starting chunked scan of node "${node.name || nodeId}"`, { chunkSize } ); await collectNodesToProcess(node, [], 0, nodesToProcess); const totalNodes = nodesToProcess.length; console.log(`Found ${totalNodes} total nodes to process`); // Calculate number of chunks needed const totalChunks = Math.ceil(totalNodes / chunkSize); console.log(`Will process in ${totalChunks} chunks`); // Send update after node collection sendProgressUpdate( commandId, 'scan_text_nodes', 'in_progress', 5, // 5% progress for collection phase totalNodes, 0, `Found ${totalNodes} nodes to scan. Will process in ${totalChunks} chunks.`, { totalNodes, totalChunks, chunkSize } ); // Process nodes in chunks const allTextNodes = []; let processedNodes = 0; let chunksProcessed = 0; for (let i = 0; i < totalNodes; i += chunkSize) { const chunkEnd = Math.min(i + chunkSize, totalNodes); console.log(`Processing chunk ${chunksProcessed + 1}/${totalChunks} (nodes ${i} to ${chunkEnd - 1})`); // Send update before processing chunk sendProgressUpdate( commandId, 'scan_text_nodes', 'in_progress', Math.round(5 + ((chunksProcessed / totalChunks) * 90)), // 5-95% for processing totalNodes, processedNodes, `Processing chunk ${chunksProcessed + 1}/${totalChunks}`, { currentChunk: chunksProcessed + 1, totalChunks, textNodesFound: allTextNodes.length } ); const chunkNodes = nodesToProcess.slice(i, chunkEnd); const chunkTextNodes = []; // Process each node in this chunk for (const nodeInfo of chunkNodes) { if (nodeInfo.node.type === "TEXT") { try { const textNodeInfo = await processTextNode(nodeInfo.node, nodeInfo.parentPath, nodeInfo.depth); if (textNodeInfo) { chunkTextNodes.push(textNodeInfo); } } catch (error) { console.error(`Error processing text node: ${error.message}`); // Continue with other nodes } } // Brief delay to allow UI updates and prevent freezing await delay(5); } // Add results from this chunk allTextNodes.push(...chunkTextNodes); processedNodes += chunkNodes.length; chunksProcessed++; // Send update after processing chunk sendProgressUpdate( commandId, 'scan_text_nodes', 'in_progress', Math.round(5 + ((chunksProcessed / totalChunks) * 90)), // 5-95% for processing totalNodes, processedNodes, `Processed chunk ${chunksProcessed}/${totalChunks}. Found ${allTextNodes.length} text nodes so far.`, { currentChunk: chunksProcessed, totalChunks, processedNodes, textNodesFound: allTextNodes.length, chunkResult: chunkTextNodes } ); // Small delay between chunks to prevent UI freezing if (i + chunkSize < totalNodes) { await delay(50); } } // Send completed progress update sendProgressUpdate( commandId, 'scan_text_nodes', 'completed', 100, totalNodes, processedNodes, `Scan complete. Found ${allTextNodes.length} text nodes.`, { textNodes: allTextNodes, processedNodes, chunks: chunksProcessed } ); return { success: true, message: `Chunked scan complete. Found ${allTextNodes.length} text nodes.`, totalNodes: allTextNodes.length, processedNodes: processedNodes, chunks: chunksProcessed, textNodes: allTextNodes, commandId }; }
- Zod input schema for scan_text_nodes tool: requires nodeId string.{ nodeId: z.string().describe("ID of the node to scan"), },
- Helper function to process individual text nodes during scanning, creating safe data structures and providing visual feedback.async function processTextNode(node, parentPath, depth) { if (node.type !== "TEXT") return null; try { // Safely extract font information let fontFamily = ""; let fontStyle = ""; if (node.fontName) { if (typeof node.fontName === "object") { if ("family" in node.fontName) fontFamily = node.fontName.family; if ("style" in node.fontName) fontStyle = node.fontName.style; } } // Create a safe representation of the text node const safeTextNode = { id: node.id, name: node.name || "Text", type: node.type, characters: node.characters, fontSize: typeof node.fontSize === "number" ? node.fontSize : 0, fontFamily: fontFamily, fontStyle: fontStyle, x: typeof node.x === "number" ? node.x : 0, y: typeof node.y === "number" ? node.y : 0, width: typeof node.width === "number" ? node.width : 0, height: typeof node.height === "number" ? node.height : 0, path: parentPath.join(" > "), depth: depth, }; // Highlight the node briefly (optional visual feedback) try { const originalFills = JSON.parse(JSON.stringify(node.fills)); node.fills = [ { type: "SOLID", color: { r: 1, g: 0.5, b: 0 }, opacity: 0.3, }, ]; // Brief delay for the highlight to be visible await delay(100); try { node.fills = originalFills; } catch (err) { console.error("Error resetting fills:", err); } } catch (highlightErr) { console.error("Error highlighting text node:", highlightErr); // Continue anyway, highlighting is just visual feedback } return safeTextNode; } catch (nodeErr) { console.error("Error processing text node:", nodeErr); return null; } }
- Helper for chunked mode: pre-collects all nodes to process before scanning in batches.async function collectNodesToProcess(node, parentPath = [], depth = 0, nodesToProcess = []) { // Skip invisible nodes if (node.visible === false) return; // Get the path to this node const nodePath = [...parentPath, node.name || `Unnamed ${node.type}`]; // Add this node to the processing list nodesToProcess.push({ node: node, parentPath: nodePath, depth: depth }); // Recursively add children if ("children" in node) { for (const child of node.children) { await collectNodesToProcess(child, nodePath, depth + 1, nodesToProcess); } } }