"""LiteLLM client wrapper for multi-provider AI support."""
import litellm
from typing import Optional
import os
class LLMClient:
"""Wrapper for LiteLLM to support multiple AI providers."""
def __init__(self, default_provider: str = "gemini"):
"""Initialize LLM client with default provider.
Args:
default_provider: Default AI provider to use (gemini, claude, openai)
"""
self.default_provider = default_provider
def complete(self,
prompt: str,
provider: Optional[str] = None,
temperature: float = 0.3,
json_mode: bool = False) -> str:
"""Send prompt to LLM and get response.
Args:
prompt: The prompt to send to the LLM
provider: AI provider to use (overrides default)
temperature: Sampling temperature (0.0-1.0)
json_mode: Enable structured JSON output (recommended for JSON responses)
Returns:
The LLM's response as a string
"""
provider = provider or self.default_provider
# Map provider names to LiteLLM model names
model_map = {
# Anthropic Claude (requires ANTHROPIC_API_KEY)
"claude": "claude-sonnet-4-5",
"claude-sonnet": "claude-sonnet-4-5",
"claude-opus": "claude-opus-4",
"claude-haiku": "claude-3-haiku",
# OpenAI (requires OPENAI_API_KEY)
"openai": "gpt-5",
"gpt-4o": "gpt-4o",
# Google Gemini (requires GOOGLE_API_KEY)
"gemini": "gemini/gemini-2.5-flash",
"gemini-flash": "gemini/gemini-2.5-flash",
"gemini-pro": "gemini/gemini-2.5-pro"
}
model = model_map.get(provider, "gemini-2.5-flash")
# Build completion arguments
completion_args = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": temperature
}
# Enable JSON mode for structured output
if json_mode:
completion_args["response_format"] = {"type": "json_object"}
print(f"[LLM_CLIENT] JSON mode enabled")
print(f"[LLM_CLIENT] Calling model: {model}")
print(f"[LLM_CLIENT] Prompt length: {len(prompt)} chars")
print(f"[LLM_CLIENT] Temperature: {temperature}")
response = litellm.completion(**completion_args)
content = response.choices[0].message.content
print(f"[LLM_CLIENT] Response received, length: {len(content)} chars")
return content