"""
LLM Provider Factory.
This module provides a factory function to instantiate the correct LLM provider
based on configuration. This is the ONLY place where provider-specific code
is imported, keeping the rest of the application provider-agnostic.
Design Pattern: Factory Pattern
- Centralizes provider instantiation logic
- Makes it easy to add new providers
- Keeps provider imports isolated
"""
from __future__ import annotations
from typing import Dict, Any
from src.config import settings
from .base import BaseLLMProvider
def get_llm_provider() -> BaseLLMProvider:
"""
Factory function to create the configured LLM provider.
This reads the LLM_PROVIDER environment variable and instantiates
the appropriate provider class with the correct configuration.
Returns:
BaseLLMProvider: Configured provider instance
Raises:
ValueError: If provider is not supported or configuration is invalid
Example:
```python
# In your application code:
provider = get_llm_provider()
response = await provider.generate(messages=[...])
# That's it! No need to know which provider is being used.
# Switch providers by changing LLM_PROVIDER env var.
```
"""
provider_name = settings.llm_provider.lower()
if provider_name == "openai":
return _create_openai_provider()
elif provider_name == "anthropic":
return _create_anthropic_provider()
elif provider_name == "google":
return _create_google_provider()
elif provider_name == "azure_openai":
return _create_azure_openai_provider()
else:
raise ValueError(
f"Unsupported LLM provider: {provider_name}. "
f"Supported providers: openai, anthropic, google, azure_openai"
)
def _create_openai_provider() -> BaseLLMProvider:
"""Create OpenAI provider with configuration from settings."""
from .openai import OpenAIProvider
if not settings.openai_api_key:
raise ValueError("OPENAI_API_KEY is required when using OpenAI provider")
config = {
"api_key": settings.openai_api_key,
"model": settings.openai_model,
"temperature": settings.openai_temperature,
"max_tokens": settings.openai_max_tokens,
}
return OpenAIProvider(config)
def _create_anthropic_provider() -> BaseLLMProvider:
"""Create Anthropic provider with configuration from settings."""
from .anthropic import AnthropicProvider
if not settings.anthropic_api_key:
raise ValueError("ANTHROPIC_API_KEY is required when using Anthropic provider")
config = {
"api_key": settings.anthropic_api_key,
"model": settings.anthropic_model,
"max_tokens": settings.anthropic_max_tokens,
}
return AnthropicProvider(config)
def _create_google_provider() -> BaseLLMProvider:
"""Create Google provider with configuration from settings."""
from .google import GoogleProvider
if not settings.google_api_key:
raise ValueError("GOOGLE_API_KEY is required when using Google provider")
config = {
"api_key": settings.google_api_key,
"model": settings.google_model,
"max_tokens": settings.google_max_tokens,
}
return GoogleProvider(config)
def _create_azure_openai_provider() -> BaseLLMProvider:
"""
Create Azure OpenAI provider with configuration from settings.
Azure OpenAI uses the same API as OpenAI but with different endpoints.
"""
from .openai import OpenAIProvider
from openai import AsyncAzureOpenAI
if not all([settings.azure_openai_api_key, settings.azure_openai_endpoint, settings.azure_openai_deployment]):
raise ValueError(
"AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, and AZURE_OPENAI_DEPLOYMENT "
"are required when using Azure OpenAI provider"
)
# Create a custom OpenAI provider with Azure client
config = {
"api_key": settings.azure_openai_api_key,
"model": settings.azure_openai_deployment,
"temperature": settings.openai_temperature,
"max_tokens": settings.openai_max_tokens,
}
provider = OpenAIProvider(config)
# Replace the client with Azure-specific client
provider.client = AsyncAzureOpenAI(
api_key=settings.azure_openai_api_key,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
)
return provider
def get_provider_info() -> Dict[str, Any]:
"""
Get information about the currently configured provider.
Useful for logging, debugging, and monitoring.
Returns:
dict: Provider information including name, model, and capabilities
"""
provider = get_llm_provider()
return {
"provider": provider.provider_name,
"model": provider.model_name,
"config": {
"temperature": getattr(provider, "default_temperature", None),
"max_tokens": getattr(provider, "default_max_tokens", None),
}
}