"""
End-to-End Integration Tests for IRIS System
Tests the complete integration of all core components:
- Redis connection and data persistence
- LLM providers (OpenAI and Anthropic)
- Conversation management
- Prompt system with Jinja2 templates
- Complete chat workflow
"""
import pytest
import asyncio
import os
from typing import Dict, Any
from src.llm_core.client import IRISClient, get_iris_client, quick_chat
from src.llm_core.models import PromptType, MessageRole
from src.redis_client.client import RedisClient
class TestE2EIntegration:
"""End-to-End Integration Tests"""
@pytest.fixture(autouse=True)
async def setup_and_cleanup(self):
"""Setup and cleanup for each test"""
# Setup
self.client = get_iris_client()
await self.client._ensure_redis_connected()
# Store test session IDs for cleanup
self.test_sessions = []
yield
# Cleanup - remove test sessions
for session_id in self.test_sessions:
try:
await self.client.end_conversation(session_id)
except Exception:
pass # Ignore cleanup errors
@pytest.mark.asyncio
async def test_redis_connection_and_persistence(self):
"""Test Redis connection and data persistence"""
# Test Redis connection
redis_client = RedisClient()
await redis_client.connect()
# Test basic operations
test_key = "test:e2e:redis"
test_value = "test_value_123"
# Set and get
await redis_client.set(test_key, test_value, ttl=60)
retrieved = await redis_client.get(test_key)
assert retrieved == test_value
# Hash operations
hash_key = "test:e2e:hash"
hash_data = {"field1": "value1", "field2": "value2"}
await redis_client.hmset(hash_key, hash_data)
retrieved_hash = await redis_client.hgetall(hash_key)
assert retrieved_hash == hash_data
# Cleanup
await redis_client.delete(test_key)
await redis_client.delete(hash_key)
await redis_client.close()
@pytest.mark.asyncio
async def test_llm_providers_health(self):
"""Test all LLM providers health checks"""
# Test OpenAI provider
openai_health = await self.client.check_provider_health("openai")
assert openai_health["status"] == "healthy"
assert openai_health["provider"] == "openai"
# Test Anthropic provider
anthropic_health = await self.client.check_provider_health("anthropic")
assert anthropic_health["status"] == "healthy"
assert anthropic_health["provider"] == "anthropic"
@pytest.mark.asyncio
async def test_conversation_lifecycle(self):
"""Test complete conversation lifecycle"""
# Start conversation
session = await self.client.start_conversation(
user_id="test_user_e2e",
user_name="E2E Test User",
prompt_type=PromptType.GENERAL_CHAT,
role="Developer",
expertise_level="advanced",
communication_style="professional"
)
self.test_sessions.append(session.session_id)
assert session.user_id == "test_user_e2e"
assert session.user_name == "E2E Test User"
assert session.prompt_type == PromptType.GENERAL_CHAT
# Send first message
response1 = await self.client.chat(
session.session_id,
"Hello! Can you help me with Python programming?"
)
assert response1.content
assert response1.provider in ["openai", "anthropic"]
# Send follow-up message
response2 = await self.client.chat(
session.session_id,
"What are the best practices for async programming?"
)
assert response2.content
# Check conversation history
messages = await self.client.get_conversation_history(session.session_id)
assert len(messages) >= 4 # 2 user messages + 2 assistant responses
# Verify message order and content
user_messages = [msg for msg in messages if msg.role == MessageRole.USER]
assistant_messages = [msg for msg in messages if msg.role == MessageRole.ASSISTANT]
assert len(user_messages) == 2
assert len(assistant_messages) == 2
assert "Python programming" in user_messages[0].content
assert "async programming" in user_messages[1].content
@pytest.mark.asyncio
async def test_prompt_system_integration(self):
"""Test prompt system with different templates"""
# Test different prompt types
prompt_types = [
PromptType.GENERAL_CHAT,
PromptType.TECHNICAL_SUPPORT,
PromptType.CREATIVE_WRITING,
PromptType.BUSINESS_ANALYSIS
]
for prompt_type in prompt_types:
session = await self.client.start_conversation(
user_id=f"test_user_{prompt_type.value}",
user_name="Prompt Test User",
prompt_type=prompt_type,
role="Tester",
expertise_level="intermediate"
)
self.test_sessions.append(session.session_id)
# Send a message to trigger prompt generation
response = await self.client.chat(
session.session_id,
f"Test message for {prompt_type.value} prompt type"
)
assert response.content
assert len(response.content) > 10 # Ensure meaningful response
@pytest.mark.asyncio
async def test_session_metadata_and_stats(self):
"""Test session metadata and statistics"""
# Create session with metadata
session = await self.client.start_conversation(
user_id="test_metadata_user",
user_name="Metadata Test User",
prompt_type=PromptType.GENERAL_CHAT,
role="QA Engineer",
expertise_level="expert",
communication_style="technical",
custom_field="test_value"
)
self.test_sessions.append(session.session_id)
# Send some messages to generate stats
await self.client.chat(session.session_id, "First test message")
await self.client.chat(session.session_id, "Second test message")
await self.client.chat(session.session_id, "Third test message")
# Check session stats
stats = await self.client.get_session_stats(session.session_id)
assert stats is not None
assert "message_count" in stats
assert stats["message_count"] >= 6 # 3 user + 3 assistant messages
@pytest.mark.asyncio
async def test_provider_switching(self):
"""Test switching between different LLM providers"""
session = await self.client.start_conversation(
user_id="test_provider_switch",
user_name="Provider Switch User",
prompt_type=PromptType.GENERAL_CHAT
)
self.test_sessions.append(session.session_id)
# Test with OpenAI
response_openai = await self.client.chat(
session.session_id,
"Hello from OpenAI!",
provider_name="openai"
)
assert response_openai.provider == "openai"
# Test with Anthropic
response_anthropic = await self.client.chat(
session.session_id,
"Hello from Anthropic!",
provider_name="anthropic"
)
assert response_anthropic.provider == "anthropic"
# Verify both responses in conversation history
messages = await self.client.get_conversation_history(session.session_id)
assistant_messages = [msg for msg in messages if msg.role == MessageRole.ASSISTANT]
assert len(assistant_messages) >= 2
@pytest.mark.asyncio
async def test_quick_chat_convenience_function(self):
"""Test the quick_chat convenience function"""
response = await quick_chat(
user_id="quick_chat_user",
user_name="Quick Chat User",
message="This is a quick chat test message",
prompt_type=PromptType.GENERAL_CHAT,
role="User",
expertise_level="beginner"
)
assert response
assert len(response) > 10
assert isinstance(response, str)
@pytest.mark.asyncio
async def test_error_handling_and_recovery(self):
"""Test error handling and system recovery"""
# Test with invalid session ID
try:
await self.client.chat("invalid_session_id", "Test message")
assert False, "Should have raised an exception"
except Exception as e:
assert "not found" in str(e).lower() or "invalid" in str(e).lower()
# Test with invalid provider
session = await self.client.start_conversation(
user_id="error_test_user",
user_name="Error Test User",
prompt_type=PromptType.GENERAL_CHAT
)
self.test_sessions.append(session.session_id)
try:
await self.client.chat(
session.session_id,
"Test message",
provider_name="invalid_provider"
)
assert False, "Should have raised an exception"
except Exception as e:
assert "provider" in str(e).lower()
@pytest.mark.asyncio
async def test_concurrent_conversations(self):
"""Test handling multiple concurrent conversations"""
# Create multiple sessions
sessions = []
for i in range(3):
session = await self.client.start_conversation(
user_id=f"concurrent_user_{i}",
user_name=f"Concurrent User {i}",
prompt_type=PromptType.GENERAL_CHAT
)
sessions.append(session)
self.test_sessions.append(session.session_id)
# Send messages concurrently
tasks = []
for i, session in enumerate(sessions):
task = self.client.chat(
session.session_id,
f"Concurrent message from user {i}"
)
tasks.append(task)
# Wait for all responses
responses = await asyncio.gather(*tasks)
# Verify all responses
assert len(responses) == 3
for response in responses:
assert response.content
assert len(response.content) > 10
@pytest.mark.asyncio
async def test_system_health_check(self):
"""Test overall system health"""
# Check Redis connection
await self.client._ensure_redis_connected()
# Check all providers
providers = ["openai", "anthropic"]
for provider in providers:
health = await self.client.check_provider_health(provider)
assert health["status"] == "healthy"
# Test basic functionality
response = await quick_chat(
user_id="health_check_user",
user_name="Health Check User",
message="System health check message",
prompt_type=PromptType.GENERAL_CHAT
)
assert response
assert len(response) > 5
if __name__ == "__main__":
# Run tests directly
pytest.main([__file__, "-v"])