Skip to main content
Glama

Aidderall MCP Server

by cheezcake
test_server.py3.25 kB
# Aidderall MCP Server - Hierarchical task management for AI assistants # Copyright (C) 2024 Briam R. <briamr@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. #!/usr/bin/env python3 """ Quick test script to verify the Aidderall MCP server is working. Run the server in one terminal: python -m src.server Then run this script in another terminal to test it. """ import asyncio import json from src.handlers import AidderallHandlers from src.task_manager import TaskManager async def test_aidderall(): print("Testing Aidderall MCP Server locally...\n") # Create instances task_manager = TaskManager() handlers = AidderallHandlers(task_manager) # Test 1: Create a new task print("1. Creating a new task...") result = await handlers.handle_create_new_task( "Design Feature", "Design a new user feature" ) print(f" Result: {result}\n") # Test 2: Get current task print("2. Getting current task...") result = await handlers.handle_get_current_task() print(f" Current task: {result['title']}\n") # Test 3: Extend with subtask print("3. Extending current task...") result = await handlers.handle_extend_current_task( "Research Requirements", "Gather user requirements" ) print(f" Result: {result}\n") # Test 4: Get big picture print("4. Getting big picture...") result = await handlers.handle_get_big_picture() print(" Structure:") print(f" {result['structure']}\n") # Test 5: Add another subtask print("5. Adding another subtask...") result = await handlers.handle_extend_current_task( "Interview Users", "Conduct user interviews" ) print(f" Result: {result}\n") # Test 6: Complete current task print("6. Completing current task...") result = await handlers.handle_complete_current_task() print(f" Completed: {result['completed_task_title']}") print(f" New current: {result['new_current_task_title']}\n") # Test 7: Get completed tasks print("7. Getting completed tasks...") result = await handlers.handle_get_completed_tasks() print(f" Completed count: {result['count']}") for task in result["tasks"]: print(f" - {task['title']}\n") # Test 8: Get stack overview print("8. Getting stack overview...") result = await handlers.handle_get_stack_overview() print(f" Stack depth: {result['stack_depth']}") print(f" Zen state: {result['zen_state']}") print(f" Completed count: {result['completed_count']}\n") print("All tests completed successfully!") if __name__ == "__main__": asyncio.run(test_aidderall())

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/cheezcake/aidderall_mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server