"""Streamlit UI for 42crunch MCP Server with LangChain."""
import streamlit as st
import os
import json
from pathlib import Path
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langgraph.prebuilt import create_react_agent
from mcp_tools import get_mcp_tools
# Multi-provider imports
try:
from langchain_anthropic import ChatAnthropic
except ImportError:
ChatAnthropic = None
try:
from langchain_google_genai import ChatGoogleGenerativeAI
except ImportError:
ChatGoogleGenerativeAI = None
# Load environment variables from .env file in ui directory
env_path = Path(__file__).parent / ".env"
load_dotenv(dotenv_path=env_path)
# Also load from project root if exists
root_env_path = Path(__file__).parent.parent / ".env"
if root_env_path.exists():
load_dotenv(dotenv_path=root_env_path, override=False)
def get_api_key(provider: str) -> str:
"""Get API key for a specific provider from .env file, ~/.ai_tokens, or environment variable."""
# Map provider to environment variable names
env_var_map = {
"openai": "OPENAI_API_KEY",
"claude": "ANTHROPIC_API_KEY",
"gemini": "GOOGLE_API_KEY",
}
env_var = env_var_map.get(provider.lower(), "OPENAI_API_KEY")
# First try .env file in ui directory (dotenv already loaded)
api_key = os.getenv(env_var, "")
if api_key:
# Clean the key: remove quotes, whitespace, newlines
api_key = api_key.strip().strip('"').strip("'").strip()
# Remove any trailing newlines or special characters
api_key = api_key.split('\n')[0].split('\r')[0]
return api_key
# Try reading from ~/.ai_tokens
ai_tokens_path = Path.home() / ".ai_tokens"
if ai_tokens_path.exists():
try:
with open(ai_tokens_path, 'r') as f:
content = f.read().strip()
# Try different formats
# Format 1: ENV_VAR=key
for line in content.split('\n'):
if line.startswith(f'{env_var}='):
key = line.split('=', 1)[1].strip().strip('"').strip("'").strip()
key = key.split('\n')[0].split('\r')[0]
if key:
return key
elif line.startswith(f'export {env_var}='):
key = line.split('=', 1)[1].strip().strip('"').strip("'").strip()
key = key.split('\n')[0].split('\r')[0]
if key:
return key
# Format 2: For OpenAI, check for sk- keys
if provider.lower() == "openai":
lines = [line.strip() for line in content.split('\n') if line.strip()]
for line in lines:
if line.startswith('sk-'):
return line
except Exception as e:
# Only show warning if running in streamlit context
try:
st.warning(f"Could not read ~/.ai_tokens: {e}")
except:
pass
return ""
def get_openai_api_key() -> str:
"""Get OpenAI API key (backward compatibility)."""
return get_api_key("openai")
# Page configuration
st.set_page_config(
page_title="42crunch MCP Assistant",
page_icon="π",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS
st.markdown("""
<style>
.main-header {
font-size: 2.5rem;
font-weight: bold;
color: #1f77b4;
margin-bottom: 1rem;
}
.sub-header {
font-size: 1.2rem;
color: #666;
margin-bottom: 2rem;
}
.tool-info {
background-color: #f0f2f6;
padding: 1rem;
border-radius: 0.5rem;
margin: 1rem 0;
}
</style>
""", unsafe_allow_html=True)
def initialize_session_state():
"""Initialize session state variables."""
if "messages" not in st.session_state:
st.session_state.messages = []
if "agent" not in st.session_state:
st.session_state.agent = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "agent_key" not in st.session_state:
st.session_state.agent_key = None
def create_llm(provider: str, api_key: str, model_name: str):
"""Create LLM instance based on provider."""
# Clean the key
api_key = api_key.strip().strip('"').strip("'")
if not api_key or len(api_key) < 10:
raise ValueError(f"Invalid {provider} API key: key is too short or empty")
provider_lower = provider.lower()
if provider_lower == "openai":
return ChatOpenAI(
model=model_name,
temperature=0,
openai_api_key=api_key,
streaming=True
)
elif provider_lower == "claude":
if ChatAnthropic is None:
raise ImportError("langchain-anthropic not installed. Run: pip install langchain-anthropic")
return ChatAnthropic(
model=model_name,
temperature=0,
anthropic_api_key=api_key,
streaming=True
)
elif provider_lower == "gemini":
if ChatGoogleGenerativeAI is None:
raise ImportError("langchain-google-genai not installed. Run: pip install langchain-google-genai")
return ChatGoogleGenerativeAI(
model=model_name,
temperature=0,
google_api_key=api_key,
streaming=True
)
else:
raise ValueError(f"Unknown provider: {provider}")
def create_agent_executor(mcp_server_url: str, provider: str, api_key: str, model_name: str):
"""Create LangChain agent with MCP tools using LangChain 1.0 API."""
try:
# Create LLM based on provider
llm = create_llm(provider, api_key, model_name)
# Get MCP tools
tools = get_mcp_tools(mcp_server_url)
# Use create_react_agent from langgraph.prebuilt (simpler approach)
try:
agent = create_react_agent(llm, tools)
return agent
except:
# Fallback: Use create_agent if available
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant for managing 42crunch API collections. "
"You can list collections, get APIs in collections, and retrieve API details. "
"Always be helpful and provide clear information."),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
agent = create_agent(llm, tools, prompt)
return agent
except Exception as e:
st.error(f"Error creating agent: {e}")
import traceback
st.error(traceback.format_exc())
return None
def main():
"""Main application."""
initialize_session_state()
# Header
st.markdown('<div class="main-header">π 42crunch MCP Assistant</div>', unsafe_allow_html=True)
st.markdown('<div class="sub-header">AI-powered assistant for managing 42crunch API collections</div>', unsafe_allow_html=True)
# Sidebar configuration
with st.sidebar:
st.header("βοΈ Configuration")
# MCP Server URL
mcp_server_url = st.text_input(
"MCP Server URL",
value=os.getenv("MCP_SERVER_URL", "http://localhost:8000"),
help="URL of the MCP HTTP server"
)
# Provider selection
providers = ["OpenAI", "Claude (Anthropic)", "Gemini (Google)"]
default_provider = os.getenv("LLM_PROVIDER", "OpenAI")
selected_provider = st.selectbox(
"AI Provider",
options=providers,
index=providers.index(default_provider) if default_provider in providers else 0,
help="Select the AI provider to use"
)
# Map provider display name to internal name
provider_map = {
"OpenAI": "openai",
"Claude (Anthropic)": "claude",
"Gemini (Google)": "gemini"
}
provider_internal = provider_map[selected_provider]
# Model selection based on provider
model_options = {
"openai": ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo"],
"claude": ["claude-3-5-sonnet-20241022", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"],
"gemini": ["gemini-pro", "gemini-1.5-pro", "gemini-1.5-flash"]
}
available_models = model_options.get(provider_internal, model_options["openai"])
default_model_env = os.getenv(f"{provider_internal.upper()}_MODEL", available_models[0])
default_model = default_model_env if default_model_env in available_models else available_models[0]
selected_model = st.selectbox(
f"{selected_provider} Model",
options=available_models,
index=available_models.index(default_model) if default_model in available_models else 0,
help=f"Select the {selected_provider} model to use"
)
# API Key input based on provider
api_key_env_vars = {
"openai": "OPENAI_API_KEY",
"claude": "ANTHROPIC_API_KEY",
"gemini": "GOOGLE_API_KEY"
}
env_var = api_key_env_vars[provider_internal]
default_key = get_api_key(provider_internal)
api_key = st.text_input(
f"{selected_provider} API Key",
value=default_key,
type="password",
help=f"Your {selected_provider} API key (or set in ui/.env, ~/.ai_tokens, or {env_var} env var)"
)
# Show key source and validation
if default_key:
# Show masked key info for debugging
key_preview = f"{default_key[:7]}...{default_key[-4:]}" if len(default_key) > 11 else "***"
st.info(f"β
API key loaded: {key_preview} (from .env, ~/.ai_tokens, or environment)")
# Validate key format based on provider
if provider_internal == "openai" and not default_key.startswith(('sk-', 'sk-proj-')):
st.warning("β οΈ OpenAI API key format looks unusual. Should start with 'sk-' or 'sk-proj-'")
elif provider_internal == "claude" and not default_key.startswith('sk-ant-'):
st.warning("β οΈ Claude API key format looks unusual. Should start with 'sk-ant-'")
elif provider_internal == "gemini" and len(default_key) < 20:
st.warning("β οΈ Gemini API key format looks unusual.")
else:
st.warning(f"β οΈ No {selected_provider} API key found. Please enter one or set in ui/.env, ~/.ai_tokens, or {env_var} environment variable")
# Check server connection
if st.button("π Check Server Connection"):
import requests
try:
response = requests.get(f"{mcp_server_url}/health", timeout=5)
if response.status_code == 200:
st.success("β
Server is connected!")
else:
st.error("β Server returned error")
except Exception as e:
st.error(f"β Cannot connect to server: {e}")
# Test API key
if st.button(f"π Test {selected_provider} API Key"):
if api_key:
try:
if provider_internal == "openai":
from openai import OpenAI
client = OpenAI(api_key=api_key.strip())
client.models.list()
st.success("β
OpenAI API key is valid!")
elif provider_internal == "claude":
if ChatAnthropic is None:
st.error("β langchain-anthropic not installed")
else:
test_llm = ChatAnthropic(model="claude-3-haiku-20240307", anthropic_api_key=api_key.strip())
test_llm.invoke("test")
st.success("β
Claude API key is valid!")
elif provider_internal == "gemini":
if ChatGoogleGenerativeAI is None:
st.error("β langchain-google-genai not installed")
else:
test_llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=api_key.strip())
test_llm.invoke("test")
st.success("β
Gemini API key is valid!")
except Exception as e:
error_msg = str(e)
if "401" in error_msg or "invalid" in error_msg.lower() or "authentication" in error_msg.lower():
st.error("β **Invalid API Key**")
st.info(f"""
The {selected_provider} API key appears to be invalid. Please:
1. Check your key in the provider's dashboard
2. Make sure the key hasn't expired
3. Verify the key in `ui/.env` is correct
4. Try generating a new key if needed
""")
else:
st.error(f"β Error testing key: {error_msg}")
else:
st.warning("β οΈ No API key to test")
st.divider()
# Available tools info
st.header("π οΈ Available Tools")
st.markdown("""
- **list_collections**: List all collections
- **get_collection_apis**: Get APIs in a collection
- **get_api_details**: Get detailed API information
""")
# Clear conversation
if st.button("ποΈ Clear Conversation"):
st.session_state.messages = []
st.session_state.chat_history = []
st.session_state.agent = None
st.rerun()
# Main chat interface
if not api_key:
st.warning(f"β οΈ Please enter your {selected_provider} API key in the sidebar to start chatting.")
st.info(f"π‘ You can also set it as an environment variable: `{env_var}`")
return
# Initialize agent if not already done (or if provider/model changed)
agent_key = f"{provider_internal}_{api_key}_{selected_model}"
if st.session_state.agent is None or st.session_state.get("agent_key") != agent_key:
with st.spinner(f"π€ Initializing {selected_provider} agent with {selected_model}..."):
agent = create_agent_executor(mcp_server_url, provider_internal, api_key, selected_model)
if agent:
st.session_state.agent = agent
st.session_state.agent_key = agent_key
st.success(f"β
Agent initialized with {selected_provider} ({selected_model})!")
else:
st.error("β Failed to initialize agent. Check your configuration.")
return
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask me about 42crunch collections, APIs, or details..."):
# Add user message
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Get agent response
with st.chat_message("assistant"):
with st.spinner("π€ Thinking..."):
try:
# Use invoke for LangGraph agents
# LangGraph agents expect messages format with chat history
from langchain_core.messages import HumanMessage, AIMessage
# Build messages list with chat history
messages = []
for msg in st.session_state.chat_history[-10:]: # Keep last 10 messages for context
if msg["role"] == "user":
messages.append(HumanMessage(content=msg["content"]))
elif msg["role"] == "assistant":
messages.append(AIMessage(content=msg["content"]))
# Add current user message
messages.append(HumanMessage(content=prompt))
result = st.session_state.agent.invoke({"messages": messages})
# Extract response and tool calls from LangGraph result
if "messages" in result:
# Get all messages from the result
all_messages = result["messages"]
# Display tool calls and results
from langchain_core.messages import ToolMessage, AIMessage
response_parts = []
for msg in all_messages:
# Display tool calls
if isinstance(msg, AIMessage):
# Check for tool_calls attribute
tool_calls = getattr(msg, 'tool_calls', None) or getattr(msg, 'tool_calls', [])
if tool_calls:
for tool_call in tool_calls:
# Handle both dict and object formats
if isinstance(tool_call, dict):
tool_name = tool_call.get('name', tool_call.get('tool', 'unknown'))
tool_args = tool_call.get('args', tool_call.get('tool_input', {}))
else:
tool_name = getattr(tool_call, 'name', getattr(tool_call, 'tool', 'unknown'))
tool_args = getattr(tool_call, 'args', getattr(tool_call, 'tool_input', {}))
with st.expander(f"π§ Using tool: **{tool_name}**", expanded=True):
st.json(tool_args if tool_args else {})
# Display tool results
if isinstance(msg, ToolMessage):
tool_name = getattr(msg, 'name', 'tool')
content = msg.content
with st.expander(f"β
Tool result: **{tool_name}**", expanded=True):
if isinstance(content, str) and len(content) > 500:
st.text_area("Result", content, height=200, disabled=True, key=f"tool_result_{id(msg)}")
else:
st.markdown(f"```\n{content}\n```")
# Collect final AI response
if isinstance(msg, AIMessage) and not (hasattr(msg, 'tool_calls') and msg.tool_calls):
if hasattr(msg, 'content'):
content = msg.content
# Handle different content formats
if isinstance(content, list):
text_parts = []
for item in content:
if isinstance(item, dict):
if 'text' in item:
text_parts.append(item['text'])
elif 'type' in item and item.get('type') == 'text' and 'text' in item:
text_parts.append(item['text'])
elif isinstance(item, str):
text_parts.append(item)
response_parts.append(' '.join(text_parts) if text_parts else str(content))
elif isinstance(content, str):
response_parts.append(content)
else:
response_parts.append(str(content))
# Display final response
response = '\n\n'.join(response_parts) if response_parts else str(result)
if not response_parts:
# Fallback: get last message
if all_messages:
last_msg = all_messages[-1]
if hasattr(last_msg, 'content'):
response = str(last_msg.content)
else:
response = str(last_msg)
else:
response = str(result)
elif "output" in result:
output = result["output"]
# Handle list format
if isinstance(output, list):
text_parts = []
for item in output:
if isinstance(item, dict) and 'text' in item:
text_parts.append(item['text'])
elif isinstance(item, str):
text_parts.append(item)
response = ' '.join(text_parts) if text_parts else str(output)
else:
response = str(output)
else:
response = str(result)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
# Update chat history
st.session_state.chat_history.append({"role": "user", "content": prompt})
st.session_state.chat_history.append({"role": "assistant", "content": response})
except Exception as e:
error_msg = str(e)
error_str_lower = error_msg.lower()
# Handle specific OpenAI errors
if any(x in error_str_lower for x in ["401", "invalid_api_key", "authentication", "incorrect api key"]):
st.error("π **Authentication Error**: Invalid OpenAI API key")
st.info("""
**Please check:**
1. Your API key in `ui/.env` file is correct and complete
2. The key hasn't expired or been revoked
3. There are no extra spaces, quotes, or newlines in the key
4. You can verify your key at: https://platform.openai.com/account/api-keys
**Current key preview:** Check the sidebar to see the loaded key format.
""")
error_msg = "Authentication failed: Invalid API key. Please verify your API key in ui/.env or the sidebar."
elif any(x in error_str_lower for x in ["404", "model_not_found", "does not exist", "do not have access"]):
st.error("π€ **Model Access Error**: You don't have access to this model")
st.info(f"""
**The selected {selected_provider} model is not available. Please:**
1. Try a different model from the dropdown
2. Check your {selected_provider} account tier and model access
3. Some models require specific API access levels
**Recommended models:**
- OpenAI: `gpt-3.5-turbo` or `gpt-4o-mini`
- Claude: `claude-3-haiku-20240307` or `claude-3-sonnet-20240229`
- Gemini: `gemini-pro` or `gemini-1.5-flash`
""")
error_msg = f"Model access error: {selected_model} is not available. Try selecting a different model in the sidebar."
else:
st.error(f"β Error: {error_msg}")
with st.expander("π Show detailed error"):
import traceback
st.code(traceback.format_exc())
st.session_state.messages.append({"role": "assistant", "content": error_msg})
if __name__ == "__main__":
main()