Skip to main content
Glama
double_check_milestone2.pyβ€’13 kB
#!/usr/bin/env python3 """ πŸ” DOUBLE CHECK COMPLETO - MILESTONE 2: PROMPT SYSTEM Verifica sistematica di tutti i componenti implementati """ import sys import os import asyncio from pathlib import Path from datetime import datetime, timezone # Aggiungi il path src per gli import sys.path.insert(0, str(Path(__file__).parent.parent / "src")) from llm_core.prompts import ( PromptManager, ContextBuilder, PromptType, PromptContext, create_general_chat_context, create_business_assistant_context, create_email_analysis_context, create_document_analysis_context, render_prompt ) def check_file_structure(): """Verifica struttura file del Prompt System""" print("πŸ“ Checking Prompt System File Structure...") base_path = Path(__file__).parent.parent / "src" / "llm_core" / "prompts" required_files = [ "__init__.py", "types.py", "context.py", "manager.py", "templates/base.j2", "templates/general_chat.j2", "templates/business_assistant.j2", "templates/email_analysis.j2", "templates/document_analysis.j2" ] missing_files = [] for file_path in required_files: full_path = base_path / file_path if not full_path.exists(): missing_files.append(str(full_path)) else: print(f" βœ… {file_path}") if missing_files: print(f" ❌ Missing files: {missing_files}") return False print(" βœ… All required files present") return True def check_imports(): """Verifica che tutti gli import funzionino""" print("\nπŸ“¦ Checking Imports...") try: # Test import principali from llm_core.prompts import PromptManager print(" βœ… PromptManager import") from llm_core.prompts import ContextBuilder print(" βœ… ContextBuilder import") from llm_core.prompts import PromptType, PromptContext print(" βœ… Types import") from llm_core.prompts import ( create_general_chat_context, create_business_assistant_context, create_email_analysis_context, create_document_analysis_context ) print(" βœ… Context creation functions import") from llm_core.prompts import render_prompt print(" βœ… Render function import") # Test import da llm_core from llm_core import IRISClient, get_iris_client, quick_chat print(" βœ… IRIS Client import") return True except ImportError as e: print(f" ❌ Import error: {e}") return False def check_template_syntax(): """Verifica sintassi template Jinja2""" print("\n🎭 Checking Template Syntax...") try: manager = PromptManager() templates = manager.list_available_templates() for template_name in templates: try: # Carica template per verificare sintassi template = manager.env.get_template(template_name) print(f" βœ… {template_name} - syntax OK") except Exception as e: print(f" ❌ {template_name} - syntax error: {e}") return False return True except Exception as e: print(f" ❌ Template manager error: {e}") return False def check_context_builders(): """Verifica funzionamento ContextBuilder""" print("\nπŸ”§ Checking Context Builders...") try: # Test ContextBuilder base builder = ContextBuilder() builder.with_user("test_user", "Test User") builder.for_prompt_type(PromptType.GENERAL_CHAT) builder.with_session("test_session") context = builder.build() assert context.user_profile.user_id == "test_user" assert context.user_profile.name == "Test User" assert context.prompt_type == PromptType.GENERAL_CHAT print(" βœ… ContextBuilder basic functionality") # Test funzioni di convenienza context_general = create_general_chat_context( user_id="test", name="Test", session_id="session" ) assert context_general.prompt_type == PromptType.GENERAL_CHAT print(" βœ… create_general_chat_context") context_business = create_business_assistant_context( user_id="test", name="Test", session_id="session", company_info={"name": "TestCorp"} ) assert context_business.prompt_type == PromptType.BUSINESS_ASSISTANT print(" βœ… create_business_assistant_context") context_email = create_email_analysis_context( user_id="test", name="Test", session_id="session", email_data={"sender": "test@test.com", "content": "Test email"} ) assert context_email.prompt_type == PromptType.EMAIL_ANALYSIS print(" βœ… create_email_analysis_context") context_doc = create_document_analysis_context( user_id="test", name="Test", session_id="session", documents=[{"name": "test.pdf", "type": "PDF"}] ) assert context_doc.prompt_type == PromptType.DOCUMENT_ANALYSIS print(" βœ… create_document_analysis_context") return True except Exception as e: print(f" ❌ Context builder error: {e}") return False def check_prompt_rendering(): """Verifica rendering prompt""" print("\n🎨 Checking Prompt Rendering...") try: # Test rendering per ogni tipo di prompt contexts = [ create_general_chat_context("test", "Test User", "session"), create_business_assistant_context( "test", "Test User", "session", company_info={"name": "TestCorp"} ), create_email_analysis_context( "test", "Test User", "session", email_data={"sender": "test@test.com", "content": "Test"} ), create_document_analysis_context( "test", "Test User", "session", documents=[{"name": "test.pdf"}] ) ] for context in contexts: prompt = render_prompt(context) # Verifica che il prompt contenga elementi essenziali assert "IRIS" in prompt assert "Test User" in prompt assert len(prompt) > 100 # Prompt non vuoto print(f" βœ… {context.prompt_type.value} rendering OK") return True except Exception as e: print(f" ❌ Prompt rendering error: {e}") return False def check_personalization(): """Verifica personalizzazione prompt""" print("\nπŸ‘€ Checking Prompt Personalization...") try: # Test personalizzazione stile comunicazione context_formal = create_general_chat_context( "test", "Dr. Smith", "session", communication_style="formal" ) context_casual = create_general_chat_context( "test", "Marco", "session", communication_style="casual" ) prompt_formal = render_prompt(context_formal) prompt_casual = render_prompt(context_casual) # I prompt dovrebbero essere diversi assert prompt_formal != prompt_casual print(" βœ… Communication style personalization") # Test personalizzazione expertise level context_beginner = create_general_chat_context( "test", "User", "session", expertise_level="beginner" ) context_expert = create_general_chat_context( "test", "User", "session", expertise_level="expert" ) prompt_beginner = render_prompt(context_beginner) prompt_expert = render_prompt(context_expert) assert prompt_beginner != prompt_expert print(" βœ… Expertise level personalization") return True except Exception as e: print(f" ❌ Personalization error: {e}") return False def check_validation(): """Verifica validazione input""" print("\nπŸ›‘οΈ Checking Input Validation...") try: # Test validazione ContextBuilder builder = ContextBuilder() # Dovrebbe fallire senza user profile try: builder.build() print(" ❌ Should have failed without user profile") return False except ValueError: print(" βœ… Correctly validates missing user profile") # Test validazione email context senza email data try: create_email_analysis_context( "test", "Test", "session" # Manca email_data ) print(" ❌ Should have failed without email data") return False except (ValueError, TypeError): print(" βœ… Correctly validates missing email data") # Test validazione document context senza documents try: create_document_analysis_context( "test", "Test", "session" # Manca documents ) print(" ❌ Should have failed without documents") return False except (ValueError, TypeError): print(" βœ… Correctly validates missing documents") return True except Exception as e: print(f" ❌ Validation error: {e}") return False def check_dependencies(): """Verifica dipendenze""" print("\nπŸ“š Checking Dependencies...") try: import jinja2 print(f" βœ… Jinja2 version: {jinja2.__version__}") # Test funzionalitΓ  Jinja2 utilizzate from jinja2 import Environment, FileSystemLoader print(" βœ… Jinja2 Environment and FileSystemLoader") # Test filtri personalizzati manager = PromptManager() assert 'datetime' in manager.env.filters assert 'truncate_smart' in manager.env.filters assert 'filesizeformat' in manager.env.filters assert 'priority_emoji' in manager.env.filters print(" βœ… Custom Jinja2 filters") return True except ImportError as e: print(f" ❌ Missing dependency: {e}") return False except Exception as e: print(f" ❌ Dependency error: {e}") return False async def check_integration(): """Verifica integrazione con altri componenti""" print("\nπŸ”— Checking Integration with Other Components...") try: # Test integrazione con IRIS Client from llm_core import IRISClient, get_iris_client client = get_iris_client() print(" βœ… IRIS Client creation") # Test health check health = await client.health_check() print(f" βœ… Health check: {health['status']}") return True except Exception as e: print(f" ❌ Integration error: {e}") return False async def main(): """Esegue tutti i controlli""" print("πŸ” IRIS MILESTONE 2 - DOUBLE CHECK COMPLETO") print("=" * 60) print("Verifica sistematica del Prompt System") print() checks = [ ("File Structure", check_file_structure), ("Imports", check_imports), ("Template Syntax", check_template_syntax), ("Context Builders", check_context_builders), ("Prompt Rendering", check_prompt_rendering), ("Personalization", check_personalization), ("Input Validation", check_validation), ("Dependencies", check_dependencies), ("Integration", check_integration) ] passed = 0 total = len(checks) for check_name, check_func in checks: print(f"\n{'='*20} {check_name} {'='*20}") try: if asyncio.iscoroutinefunction(check_func): result = await check_func() else: result = check_func() if result: passed += 1 print(f"βœ… {check_name}: PASSED") else: print(f"❌ {check_name}: FAILED") except Exception as e: print(f"❌ {check_name}: ERROR - {e}") print(f"\n{'='*60}") print(f"🎯 DOUBLE CHECK RESULTS: {passed}/{total} checks passed") if passed == total: print("πŸŽ‰ ALL CHECKS PASSED - MILESTONE 2 READY!") return True else: print("❌ Some checks failed - review required") return False if __name__ == "__main__": success = asyncio.run(main()) sys.exit(0 if success else 1)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/ilvolodel/iris-legacy'

If you have feedback or need assistance with the MCP directory API, please join our Discord server