#!/usr/bin/env python3
"""
Test integrazione Prompt System + LLM Providers
Testa i prompt generati con i provider LLM reali
"""
import sys
import os
import asyncio
from pathlib import Path
from datetime import datetime, timezone
# Aggiungi il path src per gli import
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
from llm_core.prompts import (
create_general_chat_context, create_business_assistant_context,
create_email_analysis_context, render_prompt
)
from llm_core.providers import ProviderFactory, LLMMessage, MessageRole
async def test_general_chat_with_llm():
"""Test chat generale con LLM"""
print("π¬ Testing General Chat with LLM...")
try:
# Crea contesto
context = create_general_chat_context(
user_id="test_user",
name="Marco Rossi",
session_id="test_session",
role="CEO",
company="TechCorp",
communication_style="professional"
)
# Genera prompt
system_prompt = render_prompt(context)
# Test con Claude
provider = ProviderFactory.create_provider("anthropic")
messages = [
LLMMessage(role=MessageRole.SYSTEM, content=system_prompt),
LLMMessage(role=MessageRole.USER, content="Ciao IRIS! Come puoi aiutarmi oggi?")
]
response = await provider.chat(messages, max_tokens=200)
print("β
General chat test completed")
print(f"π€ User: Ciao IRIS! Come puoi aiutarmi oggi?")
print(f"π€ IRIS: {response.content}")
print("-" * 50)
return True
except Exception as e:
print(f"β General chat test failed: {e}")
return False
async def test_business_assistant_with_llm():
"""Test assistente business con LLM"""
print("π’ Testing Business Assistant with LLM...")
try:
# Crea contesto business complesso
from llm_core.prompts.context import ContextBuilder
from llm_core.prompts.types import PromptType
context = (ContextBuilder()
.with_user("business_user", "Laura Bianchi",
role="CTO", company="InnovateTech SRL",
communication_style="casual",
expertise_level="expert")
.for_prompt_type(PromptType.BUSINESS_ASSISTANT)
.with_session("business_session")
.with_business_context(
company_info={
"name": "InnovateTech SRL",
"industry": "AI Software",
"size": "50 employees"
},
current_projects=[
{"name": "IRIS Assistant", "status": "in_progress", "priority": "high"}
],
kpis={"MRR": "β¬75,000", "Team Size": "12 developers"},
goals=["Lanciare IRIS entro Q2", "Crescita team del 50%"]
)
.build())
# Genera prompt
system_prompt = render_prompt(context)
# Test con GPT-5
provider = ProviderFactory.create_provider("openai")
messages = [
LLMMessage(role=MessageRole.SYSTEM, content=system_prompt),
LLMMessage(role=MessageRole.USER, content="Analizza i nostri KPI attuali e suggerisci strategie per raggiungere i nostri obiettivi Q2.")
]
response = await provider.chat(messages, max_tokens=300)
print("β
Business assistant test completed")
print(f"π€ User: Analizza i nostri KPI attuali e suggerisci strategie...")
print(f"π€ IRIS: {response.content}")
print("-" * 50)
return True
except Exception as e:
print(f"β Business assistant test failed: {e}")
return False
async def test_email_analysis_with_llm():
"""Test analisi email con LLM"""
print("π§ Testing Email Analysis with LLM...")
try:
# Email di esempio
email_data = {
"sender": "investor@vcfund.com",
"recipient": "marco.rossi@techcorp.com",
"subject": "Follow-up: Investment Opportunity Discussion",
"content": """Dear Marco,
Thank you for the presentation yesterday. Our investment committee was impressed with TechCorp's AI capabilities and market traction.
We'd like to move forward with due diligence. Could you please provide:
1. Financial statements for the last 2 years
2. Customer acquisition metrics
3. Technical architecture documentation
4. Team structure and key personnel CVs
We're targeting a β¬2M Series A investment with a 6-month timeline.
Best regards,
Sarah Johnson
Partner, VCFund""",
"date": datetime.now(timezone.utc),
"priority": "high"
}
# Crea contesto
context = create_email_analysis_context(
user_id="email_user",
name="Marco Rossi",
session_id="email_session",
email_data=email_data,
role="CEO",
company="TechCorp",
expertise_level="expert"
)
# Genera prompt
system_prompt = render_prompt(context)
# Test con Claude
provider = ProviderFactory.create_provider("anthropic")
messages = [
LLMMessage(role=MessageRole.SYSTEM, content=system_prompt),
LLMMessage(role=MessageRole.USER, content="Analizza questa email e fornisci raccomandazioni strategiche.")
]
response = await provider.chat(messages, max_tokens=400)
print("β
Email analysis test completed")
print(f"π€ User: Analizza questa email e fornisci raccomandazioni...")
print(f"π€ IRIS: {response.content}")
print("-" * 50)
return True
except Exception as e:
print(f"β Email analysis test failed: {e}")
return False
async def test_prompt_personalization():
"""Test personalizzazione prompt per diversi utenti"""
print("π¨ Testing Prompt Personalization...")
try:
# Stesso messaggio, utenti diversi
user_message = "Spiegami cos'Γ¨ l'intelligenza artificiale"
# Utente principiante
context_beginner = create_general_chat_context(
user_id="beginner_user",
name="Mario Verdi",
session_id="beginner_session",
expertise_level="beginner",
communication_style="casual",
preferred_response_length="short"
)
# Utente esperto
context_expert = create_general_chat_context(
user_id="expert_user",
name="Dr. Anna Rossi",
session_id="expert_session",
expertise_level="expert",
communication_style="formal",
preferred_response_length="long"
)
provider = ProviderFactory.create_provider("anthropic")
# Test con utente principiante
prompt_beginner = render_prompt(context_beginner)
messages_beginner = [
LLMMessage(role=MessageRole.SYSTEM, content=prompt_beginner),
LLMMessage(role=MessageRole.USER, content=user_message)
]
response_beginner = await provider.chat(messages_beginner, max_tokens=150)
# Test con utente esperto
prompt_expert = render_prompt(context_expert)
messages_expert = [
LLMMessage(role=MessageRole.SYSTEM, content=prompt_expert),
LLMMessage(role=MessageRole.USER, content=user_message)
]
response_expert = await provider.chat(messages_expert, max_tokens=300)
print("β
Personalization test completed")
print(f"π€ Question: {user_message}")
print(f"πΆ Beginner Response: {response_beginner.content}")
print(f"π Expert Response: {response_expert.content}")
print("-" * 50)
return True
except Exception as e:
print(f"β Personalization test failed: {e}")
return False
async def main():
"""Esegue tutti i test di integrazione"""
print("π IRIS PROMPT + LLM INTEGRATION TEST")
print("=" * 60)
tests = [
test_general_chat_with_llm,
test_business_assistant_with_llm,
test_email_analysis_with_llm,
test_prompt_personalization
]
passed = 0
total = len(tests)
for test_func in tests:
try:
if await test_func():
passed += 1
except Exception as e:
print(f"β Test {test_func.__name__} failed with exception: {e}")
print(f"\nπ― INTEGRATION TEST RESULTS: {passed}/{total} passed")
if passed == total:
print("π All integration tests passed!")
return True
else:
print("β Some integration tests failed")
return False
if __name__ == "__main__":
success = asyncio.run(main())
sys.exit(0 if success else 1)