Scrapbox MCP Server
#!/usr/bin/env node
/**
* Simple test script for Scrapbox MCP Server
*
* This script tests the Scrapbox MCP Server by directly calling the functions
* that would be called by the MCP server when handling requests.
*/
import axios from 'axios';
// Test URLs
const validUrl = 'https://scrapbox.io/razokulover-tech-memo/%E3%82%B3%E3%83%AB%E3%83%96%E3%81%AE%E7%B5%8C%E9%A8%93%E5%AD%A6%E7%BF%92%E3%83%A2%E3%83%87%E3%83%AB';
const invalidUrl = 'https://example.com/not-scrapbox';
const nonExistentUrl = 'https://scrapbox.io/razokulover-tech-memo/this-page-does-not-exist-12345';
/**
* Extracts project name and page title from a Scrapbox URL
*/
function extractScrapboxInfo(url) {
console.log("[URL] Processing URL:", url);
try {
const urlObj = new URL(url);
// Validate that this is a Scrapbox URL
if (urlObj.hostname !== "scrapbox.io") {
throw new Error("Invalid URL: Not a Scrapbox URL");
}
// Extract project name and page title from path
const pathParts = urlObj.pathname.split("/").filter(part => part);
if (pathParts.length < 2) {
throw new Error("Invalid URL format: Missing project name or page title");
}
const projectName = pathParts[0];
const pageTitle = decodeURIComponent(pathParts[1]);
console.log("[URL] Extracted project:", projectName, "page:", pageTitle);
return { projectName, pageTitle };
} catch (error) {
console.error("[Error] URL parsing failed:", error);
throw error;
}
}
/**
* Fetches page content from Scrapbox API
*/
async function fetchScrapboxPage(projectName, pageTitle) {
const apiUrl = `https://scrapbox.io/api/pages/${encodeURIComponent(projectName)}/${encodeURIComponent(pageTitle)}`;
console.log("[API] Request to endpoint:", apiUrl);
try {
const response = await axios.get(apiUrl);
const page = response.data;
// Format the page content
let formattedContent = `# ${page.title}\n\n`;
// Add descriptions if available
if (page.descriptions && page.descriptions.length > 0) {
formattedContent += "## 概要\n";
formattedContent += page.descriptions.join("\n") + "\n\n";
}
// Add content from lines
formattedContent += "## 内容\n";
formattedContent += page.lines
.slice(1) // Skip the first line (title)
.map(line => line.text)
.join("\n");
// Add metadata
formattedContent += "\n\n## メタデータ\n";
formattedContent += `- 作成日時: ${new Date(page.created).toISOString()}\n`;
formattedContent += `- 更新日時: ${new Date(page.updated).toISOString()}\n`;
formattedContent += `- 閲覧数: ${page.views}\n`;
formattedContent += `- リンク数: ${page.linked}\n`;
return formattedContent;
} catch (error) {
console.error("[Error] API request failed:", error);
throw error;
}
}
/**
* Test function that simulates the get_page_content tool
*/
async function testGetPageContent(url) {
try {
console.log(`\n===== Testing URL: ${url} =====`);
const { projectName, pageTitle } = extractScrapboxInfo(url);
const content = await fetchScrapboxPage(projectName, pageTitle);
console.log("Status: SUCCESS");
console.log("Content preview:");
console.log(content.substring(0, 200) + (content.length > 200 ? "..." : ""));
return true;
} catch (error) {
console.log("Status: ERROR");
console.log("Error:", error.message);
return false;
}
}
/**
* Run all tests
*/
async function runTests() {
console.log("Running tests...\n");
// Test 1: Valid URL
console.log("Test 1: Valid URL");
const test1Result = await testGetPageContent(validUrl);
console.log(`Test ${test1Result ? "PASSED" : "FAILED"}`);
// Test 2: Invalid URL
console.log("\nTest 2: Invalid URL");
const test2Result = await testGetPageContent(invalidUrl);
console.log(`Test ${!test2Result ? "PASSED" : "FAILED"}`);
// Test 3: Non-existent page
console.log("\nTest 3: Non-existent page");
const test3Result = await testGetPageContent(nonExistentUrl);
console.log(`Test ${!test3Result ? "PASSED" : "FAILED"}`);
console.log("\nAll tests completed.");
}
// Run the tests
runTests().catch(error => {
console.error("Test error:", error);
process.exit(1);
});