#!/usr/bin/env python3
"""
Test LLM Provider Structure
Test provider classes without making API calls
"""
import sys
import os
# Add project root to path
project_root = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, project_root)
from src.llm_core.providers.factory import ProviderFactory, ProviderType
from src.llm_core.providers.base import LLMMessage, MessageRole, LLMResponse
from src.llm_core.providers.openai_provider import OpenAIProvider
from src.llm_core.providers.anthropic_provider import AnthropicProvider
import logging
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def test_message_classes():
"""Test LLMMessage and LLMResponse classes"""
logger.info("π Testing Message Classes...")
try:
# Test LLMMessage
message = LLMMessage(
role=MessageRole.USER,
content="Hello, world!",
metadata={"timestamp": "2025-01-01T00:00:00Z"}
)
# Test serialization
message_dict = message.to_dict()
assert message_dict["role"] == "user"
assert message_dict["content"] == "Hello, world!"
assert message_dict["metadata"]["timestamp"] == "2025-01-01T00:00:00Z"
logger.info("β
LLMMessage serialization works")
# Test deserialization
restored_message = LLMMessage.from_dict(message_dict)
assert restored_message.role == MessageRole.USER
assert restored_message.content == "Hello, world!"
logger.info("β
LLMMessage deserialization works")
# Test LLMResponse
response = LLMResponse(
content="Hello back!",
model="gpt-4o-mini",
provider="openai",
usage={"total_tokens": 10},
metadata={"finish_reason": "stop"}
)
response_dict = response.to_dict()
assert response_dict["content"] == "Hello back!"
assert response_dict["model"] == "gpt-4o-mini"
assert response_dict["provider"] == "openai"
logger.info("β
LLMResponse serialization works")
return True
except Exception as e:
logger.error(f"β Message classes test failed: {e}")
return False
def test_provider_classes():
"""Test provider class structure"""
logger.info("ποΈ Testing Provider Classes...")
try:
# Test OpenAI provider structure
fake_api_key = "sk-fake-key-for-testing"
openai_provider = OpenAIProvider(api_key=fake_api_key)
assert openai_provider.get_provider_name() == "openai"
assert openai_provider.get_default_model() == "gpt-4o-mini"
assert "gpt-4o" in openai_provider.get_available_models()
logger.info("β
OpenAI provider structure correct")
# Test Anthropic provider structure
anthropic_provider = AnthropicProvider(api_key=fake_api_key)
assert anthropic_provider.get_provider_name() == "anthropic"
assert "claude" in anthropic_provider.get_default_model()
assert len(anthropic_provider.get_available_models()) > 0
logger.info("β
Anthropic provider structure correct")
# Test token counting
test_text = "Hello, this is a test message for token counting."
openai_tokens = openai_provider.count_tokens(test_text)
anthropic_tokens = anthropic_provider.count_tokens(test_text)
assert openai_tokens > 0
assert anthropic_tokens > 0
logger.info(f"β
Token counting: OpenAI={openai_tokens}, Anthropic={anthropic_tokens}")
# Test message validation
valid_messages = [
LLMMessage(role=MessageRole.USER, content="Hello"),
LLMMessage(role=MessageRole.ASSISTANT, content="Hi there!")
]
openai_provider.validate_messages(valid_messages)
anthropic_provider.validate_messages(valid_messages)
logger.info("β
Message validation works")
# Test invalid messages
try:
invalid_messages = [LLMMessage(role=MessageRole.USER, content="")]
openai_provider.validate_messages(invalid_messages)
assert False, "Should have raised validation error"
except ValueError:
logger.info("β
Invalid message validation works")
return True
except Exception as e:
logger.error(f"β Provider classes test failed: {e}")
return False
def test_factory_structure():
"""Test factory structure without API calls"""
logger.info("π Testing Factory Structure...")
try:
# Test provider types
assert ProviderType.OPENAI == "openai"
assert ProviderType.ANTHROPIC == "anthropic"
logger.info("β
Provider types defined correctly")
# Test factory registry
assert ProviderType.OPENAI in ProviderFactory._providers
assert ProviderType.ANTHROPIC in ProviderFactory._providers
logger.info("β
Factory registry contains providers")
# Test available providers (without API keys)
available = ProviderFactory.get_available_providers()
assert "openai" in available
assert "anthropic" in available
# Check provider info structure
for provider_name, info in available.items():
assert "available" in info
assert "default_model" in info
assert "models" in info
logger.info(f"β
{provider_name}: Structure correct (available: {info['available']})")
return True
except Exception as e:
logger.error(f"β Factory structure test failed: {e}")
return False
def test_message_conversion():
"""Test message format conversion"""
logger.info("π Testing Message Conversion...")
try:
fake_api_key = "sk-fake-key-for-testing"
# Test OpenAI message conversion
openai_provider = OpenAIProvider(api_key=fake_api_key)
messages = [
LLMMessage(role=MessageRole.SYSTEM, content="You are helpful."),
LLMMessage(role=MessageRole.USER, content="Hello!"),
LLMMessage(role=MessageRole.ASSISTANT, content="Hi there!")
]
openai_messages = openai_provider._convert_messages(messages)
assert len(openai_messages) == 3
assert openai_messages[0]["role"] == "system"
assert openai_messages[1]["role"] == "user"
assert openai_messages[2]["role"] == "assistant"
logger.info("β
OpenAI message conversion works")
# Test Anthropic message conversion
anthropic_provider = AnthropicProvider(api_key=fake_api_key)
system_msg, conv_messages = anthropic_provider._convert_messages(messages)
assert system_msg == "You are helpful."
assert len(conv_messages) == 2 # System message separated
assert conv_messages[0]["role"] == "user"
assert conv_messages[1]["role"] == "assistant"
logger.info("β
Anthropic message conversion works")
return True
except Exception as e:
logger.error(f"β Message conversion test failed: {e}")
return False
def main():
"""Run all structure tests"""
try:
logger.info("π Starting LLM Provider Structure Tests...")
tests = [
("Message Classes", test_message_classes),
("Provider Classes", test_provider_classes),
("Factory Structure", test_factory_structure),
("Message Conversion", test_message_conversion)
]
results = {}
for test_name, test_func in tests:
logger.info(f"\n--- {test_name} ---")
results[test_name] = test_func()
# Summary
passed = sum(1 for result in results.values() if result)
total = len(results)
logger.info("\n" + "=" * 50)
logger.info(f"π Structure Tests Complete: {passed}/{total} passed")
for test_name, result in results.items():
status = "β
" if result else "β"
logger.info(f" {status} {test_name}")
if passed == total:
logger.info("\nβ
All LLM provider structures are working correctly!")
logger.info("π‘ To test actual API calls, configure OPENAI_API_KEY or ANTHROPIC_API_KEY in .env")
return passed == total
except Exception as e:
logger.error(f"β Structure tests failed: {e}")
return False
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)