Skip to main content
Glama

MCP Agent Tracker

by Big0290
test_goals_flow_fix.pyโ€ข4.35 kB
#!/usr/bin/env python3 """ ๐Ÿงช Test Script: Verify Goals Flow Fix for Technical Questions """ def test_goals_flow_fix(): """Test if technical questions now include conversation context""" print("๐Ÿงช TESTING GOALS FLOW FIX FOR TECHNICAL QUESTIONS") print("=" * 70) try: # Test 1: Import optimized prompt generator print("1๏ธโƒฃ Testing optimized prompt generator import...") from optimized_prompt_generator import OptimizedPromptGenerator generator = OptimizedPromptGenerator() print("โœ… OptimizedPromptGenerator imported successfully") # Test 2: Create test context print("\n2๏ธโƒฃ Testing with technical question...") from prompt_generator import PromptContext test_context = PromptContext( conversation_summary="Test conversation summary", action_history="Test action history", tech_stack="Test tech stack", project_plans="๐ŸŽฏ PROJECT PLANS & OBJECTIVES:\n1. Build powerful conversation tracking system โœ…\n2. Implement context-aware prompt processing โœ…\n3. Create intelligent memory management system โœ…", user_preferences="Test user preferences", agent_metadata="Test agent metadata", recent_interactions=[], project_patterns=[], best_practices=[], common_issues=[], development_workflow=[], confidence_score=0.9, context_type="test" ) # Test 3: Test intent classification for technical question print("\n3๏ธโƒฃ Testing intent classification for technical question...") context_dict = generator._context_to_dict(test_context) if generator.intent_selector: relevant_context, intent_analysis = generator.intent_selector.select_relevant_context( "test goals flow and updates", context_dict ) print(f"โœ… Intent classification successful") print(f"๐ŸŽฏ Intent: {intent_analysis.primary_intent.value}") print(f"๐Ÿ“‹ Context requirements: {intent_analysis.context_requirements}") print(f"๐Ÿ”ง Selected context: {list(relevant_context.keys())}") else: print("โš ๏ธ Intent selector not available") return # Test 4: Test full prompt generation print("\n4๏ธโƒฃ Testing full prompt generation...") try: full_prompt = generator._create_phase1_optimized_prompt( "test goals flow and updates", test_context, "smart" ) print(f"โœ… Full prompt generated") print(f"๐Ÿ“‹ Prompt length: {len(full_prompt)}") print(f"๐Ÿ’ฌ Contains context: {'๐Ÿ’ฌ CONTEXT:' in full_prompt}") print(f"๐Ÿ“ Contains recent: {'๐Ÿ“ RECENT:' in full_prompt}") print(f"๐ŸŽฏ Contains goals: {'๐ŸŽฏ GOALS:' in full_prompt}") # Show the relevant section if '๐Ÿ’ฌ CONTEXT:' in full_prompt: context_start = full_prompt.find('๐Ÿ’ฌ CONTEXT:') context_end = full_prompt.find('\n\n', context_start) if context_end == -1: context_end = len(full_prompt) context_section = full_prompt[context_start:context_end] print(f"๐Ÿ“‹ Conversation context section:\n{context_section}") else: print("โŒ No conversation context section found!") except Exception as e: print(f"โŒ Full prompt generation failed: {e}") import traceback traceback.print_exc() print("\n" + "=" * 70) print("๐Ÿงช TEST COMPLETE") # Final assessment if '๐Ÿ’ฌ CONTEXT:' in full_prompt and '๐ŸŽฏ GOALS:' in full_prompt: print("๐ŸŽ‰ SUCCESS: Technical questions now include conversation context with goals!") else: print("โŒ FAILURE: Technical questions still missing conversation context!") except Exception as e: print(f"โŒ Error during testing: {e}") import traceback traceback.print_exc() if __name__ == "__main__": test_goals_flow_fix()

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Big0290/MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server