Skip to main content
Glama
test_web_search.pyβ€’7.96 kB
""" Test E2E Web Search Integration """ import asyncio import logging from datetime import datetime from src.llm_core.client import IRISClient from src.llm_core.providers.base import MessageRole from src.llm_core.prompts import PromptType # Setup logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' ) logger = logging.getLogger(__name__) async def test_web_search(): """Test smart routing e web search OpenAI""" logger.info("=" * 80) logger.info("πŸ§ͺ TEST WEB SEARCH INTEGRATION") logger.info("=" * 80) # Inizializza IRIS Client con Claude default client = IRISClient(default_provider="anthropic") # Start conversation user_id = "test_user_web_search" user_name = "Test User" session = await client.start_conversation( user_id=user_id, user_name=user_name, prompt_type=PromptType.BUSINESS_ASSISTANT ) logger.info(f"βœ… Conversation started: {session.session_id}") # Test cases test_queries = [ { "query": "Ciao, come stai?", "expected_provider": "anthropic", "expected_search": False, "description": "Query normale - Claude (no search)" }, { "query": "Cerca le ultime notizie su intelligenza artificiale", "expected_provider": "openai", "expected_search": True, "description": "Query con ricerca - GPT-4o + web search" }, { "query": "Trova informazioni recenti su OpenAI GPT-5", "expected_provider": "openai", "expected_search": True, "description": "Query con ricerca - GPT-4o + web search" }, { "query": "Riassumi quello che hai trovato", "expected_provider": "anthropic", "expected_search": False, "description": "Follow-up senza search - Claude" } ] results = [] for i, test in enumerate(test_queries, 1): logger.info(f"\n{'─' * 80}") logger.info(f"TEST {i}/{len(test_queries)}: {test['description']}") logger.info(f"Query: {test['query']}") logger.info(f"Expected: {test['expected_provider']} | Search: {test['expected_search']}") try: # Send message response = await client.chat( session_id=session.session_id, message=test['query'], enable_smart_routing=True ) # Check results actual_provider = response.metadata.get("provider", response.provider) has_web_search = response.metadata.get("web_search", False) routing_info = response.metadata.get("routing") logger.info(f"βœ… Response received") logger.info(f" Provider: {actual_provider}") logger.info(f" Model: {response.model}") logger.info(f" Web search: {has_web_search}") logger.info(f" Tokens: {response.usage.get('total_tokens', 'N/A')}") if routing_info: logger.info(f" Routing: {routing_info['reason']}") logger.info(f" Response preview: {response.content[:150]}...") # Validate success = True errors = [] # Note: Il provider nei metadata potrebbe essere quello default se non specificato # Quindi controlliamo il model usato per verificare if has_web_search != test['expected_search']: success = False errors.append(f"Web search mismatch: expected {test['expected_search']}, got {has_web_search}") if has_web_search and response.model != "gpt-4o": success = False errors.append(f"Web search should use gpt-4o, got {response.model}") results.append({ "test": test['description'], "success": success, "errors": errors, "provider": actual_provider, "model": response.model, "web_search": has_web_search }) if success: logger.info("βœ… TEST PASSED") else: logger.error(f"❌ TEST FAILED: {', '.join(errors)}") except Exception as e: logger.error(f"❌ TEST ERROR: {e}") results.append({ "test": test['description'], "success": False, "errors": [str(e)] }) # Summary logger.info(f"\n{'=' * 80}") logger.info("πŸ“Š TEST SUMMARY") logger.info(f"{'=' * 80}") passed = sum(1 for r in results if r['success']) total = len(results) for i, result in enumerate(results, 1): status = "βœ… PASS" if result['success'] else "❌ FAIL" logger.info(f"{i}. {status} - {result['test']}") if not result['success']: for error in result.get('errors', []): logger.info(f" Error: {error}") logger.info(f"\n{'=' * 80}") logger.info(f"RESULT: {passed}/{total} tests passed ({passed/total*100:.1f}%)") logger.info(f"{'=' * 80}") return passed == total async def test_web_search_simple(): """Test semplice solo web search""" logger.info("=" * 80) logger.info("πŸ§ͺ TEST SIMPLE WEB SEARCH") logger.info("=" * 80) from src.llm_core.providers import ProviderFactory from src.llm_core.providers.base import LLMMessage, MessageRole # Test diretto OpenAI provider provider = ProviderFactory.create_provider("openai") messages = [ LLMMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."), LLMMessage(role=MessageRole.USER, content="Cerca le ultime notizie su AI regulation in Europa") ] logger.info("πŸ” Testing OpenAI web search directly...") try: response = await provider.chat_with_search( messages=messages, search_context_size="medium", user_location={"country": "IT"} ) logger.info(f"βœ… Web search successful!") logger.info(f" Model: {response.model}") logger.info(f" Tokens: {response.usage.get('total_tokens', 'N/A')}") logger.info(f" Web search: {response.metadata.get('web_search', False)}") logger.info(f" Response: {response.content[:200]}...") return True except Exception as e: logger.error(f"❌ Web search failed: {e}") return False async def main(): """Run all tests""" logger.info("πŸš€ Starting Web Search Integration Tests") logger.info(f"Time: {datetime.now().isoformat()}\n") # Test 1: Simple web search logger.info("\n" + "=" * 80) logger.info("TEST 1: Simple Web Search (Direct Provider)") logger.info("=" * 80) test1_passed = await test_web_search_simple() # Test 2: Smart routing logger.info("\n" + "=" * 80) logger.info("TEST 2: Smart Routing with IRIS Client") logger.info("=" * 80) test2_passed = await test_web_search() # Final summary logger.info("\n" + "=" * 80) logger.info("🎯 FINAL SUMMARY") logger.info("=" * 80) logger.info(f"Test 1 (Simple Web Search): {'βœ… PASSED' if test1_passed else '❌ FAILED'}") logger.info(f"Test 2 (Smart Routing): {'βœ… PASSED' if test2_passed else '❌ FAILED'}") all_passed = test1_passed and test2_passed logger.info(f"\nOVERALL: {'βœ… ALL TESTS PASSED' if all_passed else '❌ SOME TESTS FAILED'}") logger.info("=" * 80) return all_passed if __name__ == "__main__": success = asyncio.run(main()) exit(0 if success else 1)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/ilvolodel/iris-legacy'

If you have feedback or need assistance with the MCP directory API, please join our Discord server