Skip to main content
Glama

Aidderall MCP Server

by cheezcake
test_improved_feedback.py3.29 kB
# Aidderall MCP Server - Hierarchical task management for AI assistants # Copyright (C) 2024 Briam R. <briamr@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. #!/usr/bin/env python3 """ Manual test to demonstrate the improved feedback in Aidderall. Run with: python tests/manual/test_improved_feedback.py """ import asyncio import json from src.handlers import AidderallHandlers from src.task_manager import TaskManager async def pretty_print(response): """Pretty print a response with proper formatting.""" print(json.dumps(response, indent=2)) print("-" * 80) print() async def main(): # Initialize task_manager = TaskManager() handlers = AidderallHandlers(task_manager) print("=== Aidderall Improved Feedback Demo ===\n") # 1. Create an independent task print("1. Creating an independent task:") response = await handlers.handle_create_new_task( "Plan Monitoring System", "Design a comprehensive monitoring solution" ) await pretty_print(response) # 2. Extend with a blocking subtask print("2. Adding a blocking subtask (extend):") response = await handlers.handle_extend_current_task( "Define KPIs", "Determine key performance indicators to track" ) await pretty_print(response) # 3. Extend again to show commitment stacking print("3. Adding another blocking subtask:") response = await handlers.handle_extend_current_task( "Design Database Schema", "Create schema for storing metrics" ) await pretty_print(response) # 4. Extend once more to trigger depth warning print("4. Adding a third level (should trigger warning):") response = await handlers.handle_extend_current_task( "Create Tables", "SQL statements for table creation" ) await pretty_print(response) # 5. Check current task with commitment info print("5. Getting current task status:") response = await handlers.handle_get_current_task() await pretty_print(response) # 6. Complete a task to show remaining commitments print("6. Completing current task:") response = await handlers.handle_complete_current_task() await pretty_print(response) # 7. Create a new independent task (interruption) print("7. Creating an interruption (new independent task):") response = await handlers.handle_create_new_task( "Fix Critical Bug", "Production issue needs immediate attention" ) await pretty_print(response) # 8. Show the big picture print("8. Getting the big picture:") response = await handlers.handle_get_big_picture() await pretty_print(response) if __name__ == "__main__": asyncio.run(main())

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/cheezcake/aidderall_mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server