Skip to main content
Glama

MCP Self-Learning Server

test-python-simple.py•4.47 kB
#!/usr/bin/env python3 """ Simple test script for Python Client Library (Claudia Integration) """ import sys import asyncio import json from pathlib import Path # Add lib directory to path sys.path.insert(0, str(Path(__file__).parent / 'lib')) from self_learning_client import SelfLearningClient async def test_python_client(): print("šŸ Testing Python Client Library...\n") # Initialize client client = SelfLearningClient(base_url="http://localhost:8765") try: # Test connection print("1. Testing connection...") connected = await client.connect() print(f" āœ… Connected: {connected}\n") # Test health check print("2. Testing health check...") health = await client.get_health() print(f" āœ… Health: {health['status']}") print(f" šŸ“Š Uptime: {round(health['uptime'] / 1000)}s\n") # Test status print("3. Testing status...") status = await client.get_status() print(f" āœ… Running: {status.running}") print(f" 🧠 Learning Active: {status.learning_status}") print(f" šŸ“Š Patterns: {status.patterns_count}\n") # Test voice interaction analysis print("4. Testing voice interaction analysis...") user_input = "What is the weather like today?" assistant_output = "Today will be sunny with a high of 75°F" context = { 'intent': 'weather_query', 'confidence': 0.95, 'location': 'test_location' } result = await client.analyze_voice_interaction(user_input, assistant_output, context, True) print(f" āœ… Pattern ID: {result['patternId'][:8]}...") print(f" šŸŽÆ Tool Sequence: {result['features']['toolSequence']}") print(f" šŸ“Š Context Intent: {context['intent']}\n") # Test pattern analysis (general method) print("5. Testing pattern analysis...") interaction = { 'type': 'voice_interaction', 'input': user_input, 'output': assistant_output, 'success': True, 'context': context } pattern_result = await client.analyze_pattern(interaction) print(f" āœ… Pattern ID: {pattern_result['patternId'][:8]}...") print(f" šŸ”§ Features extracted: {len(pattern_result['features'])} categories\n") # Test insights print("6. Testing insights...") insights = await client.get_insights() print(f" āœ… Total interactions: {insights.performance_metrics.get('totalInteractions', 0)}") print(f" 🧠 Knowledge items: {len(insights.top_patterns)}") print(f" šŸ”„ Learning cycles: {insights.learning_cycles}\n") # Test optimizations print("7. Testing optimizations...") optimizations = await client.get_optimizations() print(f" āœ… Optimization suggestions available: {optimizations['success']}") if optimizations.get('suggestions'): print(f" šŸ’” Found {len(optimizations['suggestions'])} suggestions") print() # Test predictions print("8. Testing predictions...") prediction_context = { 'current_intent': 'weather_query', 'user_history': ['greeting', 'weather_query'], 'time_of_day': 'morning' } prediction = await client.predict_next_action(prediction_context) print(f" āœ… Predictions available: {prediction['success']}") if prediction.get('predictions'): print(f" šŸ”® Top prediction: {prediction['predictions'][0]['action']}") print() # Test performance metrics print("9. Testing performance metrics...") metrics = await client.get_performance_metrics() print(f" āœ… Metrics available: {metrics['success']}") if metrics.get('metrics'): print(f" šŸ“ˆ Response time avg: {metrics['metrics'].get('responseTime', 'N/A')}") print() print("āœ… All Python client tests passed!\n") except Exception as error: print(f"āŒ Python client test failed: {error}") import traceback traceback.print_exc() sys.exit(1) finally: await client.disconnect() if __name__ == "__main__": asyncio.run(test_python_client())

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/saralegui-solutions/mcp-self-learning-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server