#!/usr/bin/env python3
"""
Automated development workflow for Canvas MCP.
Handles server restart, testing, and feedback collection.
"""
import subprocess
import time
import json
import sys
from pathlib import Path
class MCPDevWorkflow:
"""Automated workflow for MCP development and testing."""
def __init__(self):
self.project_root = Path(__file__).parent.parent
self.server_process = None
def restart_mcp_server(self):
"""Restart the Canvas MCP server with new changes."""
print("🔄 Restarting Canvas MCP server...")
# Kill existing server if running
try:
subprocess.run(["pkill", "-f", "canvas_mcp"], check=False)
time.sleep(2)
except:
pass
# Start new server
server_script = self.project_root / "start_canvas_server.sh"
if server_script.exists():
self.server_process = subprocess.Popen(
["bash", str(server_script)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
print("✅ Server restarted")
time.sleep(3) # Give server time to initialize
else:
print("❌ Server start script not found")
def run_tests(self):
"""Run automated test suite."""
print("🧪 Running automated tests...")
test_script = self.project_root / "tests" / "test_discussion_enhancements.py"
if test_script.exists():
result = subprocess.run(
["python", str(test_script)],
capture_output=True,
text=True
)
if result.returncode == 0:
print("✅ Tests completed successfully")
return result.stdout
else:
print("❌ Tests failed")
print(result.stderr)
return None
else:
print("❌ Test script not found")
return None
def create_feedback_template(self):
"""Create a feedback template for Claude Desktop testing."""
template = """# Canvas MCP Testing Feedback
## Test Date: {date}
## Functions Tested:
- [ ] list_discussion_entries (basic)
- [ ] list_discussion_entries (include_full_content=True)
- [ ] list_discussion_entries (include_replies=True)
- [ ] list_discussion_entries (both parameters=True)
- [ ] get_discussion_entry_details (enhanced)
- [ ] get_discussion_with_replies
## Performance Test:
**Before (Old Approach):**
- API Calls: 8 (1 + 7 individual)
- Time: _____ seconds
**After (New Approach):**
- API Calls: _____
- Time: _____ seconds
- Improvement: ____%
## Content Quality Test:
**Truncation Issue:**
- [ ] FIXED: No more "..." in content
- [ ] ISSUE: Still seeing truncated content
- [ ] OTHER: ________________
**Reply Retrieval:**
- [ ] WORKING: Replies fetch successfully
- [ ] ISSUE: Still getting 404 errors
- [ ] OTHER: ________________
## User Experience:
**Ease of Use:**
- [ ] EXCELLENT: Very intuitive
- [ ] GOOD: Some minor issues
- [ ] NEEDS WORK: Confusing or difficult
**Documentation:**
- [ ] CLEAR: Easy to understand parameters
- [ ] UNCLEAR: Need better examples
- [ ] MISSING: Need more documentation
## Specific Issues Found:
1. ________________________________
2. ________________________________
3. ________________________________
## Suggestions for Improvement:
1. ________________________________
2. ________________________________
3. ________________________________
## Overall Rating: ___/10
## Next Steps:
- [ ] Ready for production use
- [ ] Needs minor tweaks
- [ ] Requires significant changes
- [ ] Back to development
---
*Generated by Canvas MCP Dev Workflow*
""".format(date=time.strftime('%Y-%m-%d %H:%M:%S'))
feedback_file = self.project_root / "feedback_template.md"
with open(feedback_file, "w") as f:
f.write(template)
print(f"📝 Feedback template created: {feedback_file}")
return feedback_file
def setup_claude_desktop_integration(self):
"""Set up integration with Claude Desktop for testing."""
integration_guide = """# Claude Desktop Integration for Testing
## Quick Testing Commands:
### 1. Basic Functionality Test
```
canvas-api:list_discussion_entries
- course_identifier: "badm_350_120255_247989"
- topic_id: "854762"
```
### 2. Full Content Test (MAIN FIX)
```
canvas-api:list_discussion_entries
- course_identifier: "badm_350_120255_247989"
- topic_id: "854762"
- include_full_content: true
```
### 3. Replies Test
```
canvas-api:list_discussion_entries
- course_identifier: "badm_350_120255_247989"
- topic_id: "854762"
- include_replies: true
```
### 4. Combined Test
```
canvas-api:list_discussion_entries
- course_identifier: "badm_350_120255_247989"
- topic_id: "854762"
- include_full_content: true
- include_replies: true
```
### 5. Individual Entry Test (Enhanced)
```
canvas-api:get_discussion_entry_details
- course_identifier: "badm_350_120255_247989"
- topic_id: "854762"
- entry_id: "4170390"
- include_replies: true
```
## What to Look For:
✅ **Success Indicators:**
- No "..." truncation in content
- Complete post text visible
- Replies loading without 404 errors
- Faster performance (fewer API calls)
❌ **Issues to Report:**
- Still seeing "..." in content
- 404 errors on replies
- Slow performance
- Missing or incomplete data
- Function not available (server restart needed)
## Performance Comparison:
**Old Way:** 8 API calls for 7 posts + details
**New Way:** 1-2 API calls for same data
**Expected:** ~87% improvement in speed
"""
guide_file = self.project_root / "CLAUDE_DESKTOP_TESTING.md"
with open(guide_file, "w") as f:
f.write(integration_guide)
print(f"📋 Claude Desktop testing guide created: {guide_file}")
return guide_file
def full_development_cycle(self):
"""Run complete development cycle: restart, test, prepare feedback."""
print("🚀 Starting full development cycle...\n")
# Step 1: Restart server
self.restart_mcp_server()
# Step 2: Run automated tests
test_results = self.run_tests()
# Step 3: Create feedback template
feedback_file = self.create_feedback_template()
# Step 4: Setup Claude Desktop integration
testing_guide = self.setup_claude_desktop_integration()
print("\n" + "="*60)
print("🎉 Development Cycle Complete!")
print("="*60)
print(f"📁 Feedback Template: {feedback_file}")
print(f"📋 Testing Guide: {testing_guide}")
print("\n💡 Next Steps:")
print("1. Test functions in Claude Desktop using the testing guide")
print("2. Fill out the feedback template with results")
print("3. Report any issues found")
print("4. Iterate if needed")
print("\n🔄 To restart cycle: python scripts/dev_workflow.py")
def main():
"""Main workflow runner."""
if len(sys.argv) > 1:
workflow = MCPDevWorkflow()
if sys.argv[1] == "restart":
workflow.restart_mcp_server()
elif sys.argv[1] == "test":
workflow.run_tests()
elif sys.argv[1] == "feedback":
workflow.create_feedback_template()
elif sys.argv[1] == "guide":
workflow.setup_claude_desktop_integration()
else:
print("Usage: python dev_workflow.py [restart|test|feedback|guide]")
else:
# Run full cycle
workflow = MCPDevWorkflow()
workflow.full_development_cycle()
if __name__ == "__main__":
main()