setup.py•4.61 kB
#!/usr/bin/env python3
"""
Setup script for Llama 4 Maverick MCP Server
Author: Yobie Benjamin
Version: 0.9
Date: August 1, 2025
This script sets up the Python MCP server environment.
"""
import os
import sys
import subprocess
from pathlib import Path
def check_python_version():
"""Check if Python version is 3.9 or higher."""
if sys.version_info < (3, 9):
print("❌ Python 3.9 or higher is required")
print(f" Current version: {sys.version}")
sys.exit(1)
print(f"✅ Python version: {sys.version.split()[0]}")
def check_ollama():
"""Check if Ollama is installed and running."""
try:
result = subprocess.run(
["ollama", "--version"],
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
print("✅ Ollama is installed")
# Check if Ollama is running
import httpx
try:
response = httpx.get("http://localhost:11434/api/tags", timeout=2.0)
if response.status_code == 200:
print("✅ Ollama service is running")
else:
print("⚠️ Ollama service is not responding")
print(" Start it with: ollama serve")
except:
print("⚠️ Ollama service is not running")
print(" Start it with: ollama serve")
else:
raise FileNotFoundError
except FileNotFoundError:
print("⚠️ Ollama is not installed")
print(" Install from: https://ollama.com")
print(" The server will run in limited mode without Ollama")
def create_env_file():
"""Create .env file if it doesn't exist."""
env_file = Path(".env")
env_example = Path(".env.example")
if not env_file.exists():
if env_example.exists():
import shutil
shutil.copy(env_example, env_file)
print("✅ Created .env file from .env.example")
else:
# Create a basic .env file
env_content = """# Llama 4 Maverick MCP Configuration
LLAMA_API_URL=http://localhost:11434
LLAMA_MODEL_NAME=llama3:latest
MCP_LOG_LEVEL=INFO
ENABLE_CODE_EXECUTION=false
ENABLE_WEB_SEARCH=true
FILE_SYSTEM_BASE_PATH=.
"""
env_file.write_text(env_content)
print("✅ Created default .env file")
else:
print("✅ .env file already exists")
def install_dependencies():
"""Install Python dependencies."""
print("\n📦 Installing dependencies...")
# Check if in virtual environment
if sys.prefix == sys.base_prefix:
print("⚠️ Not in a virtual environment")
print(" Consider creating one: python -m venv venv")
response = input(" Continue anyway? (y/n): ")
if response.lower() != 'y':
sys.exit(0)
# Install dependencies
subprocess.run([
sys.executable, "-m", "pip", "install", "-e", "."
], check=True)
print("✅ Dependencies installed")
def pull_model():
"""Pull the default Llama model."""
print("\n📥 Checking Llama model...")
try:
# Check if model exists
result = subprocess.run(
["ollama", "list"],
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
# Check if llama3:latest is in the list
if "llama3:latest" in result.stdout:
print("✅ Model llama3:latest is available")
else:
print("📥 Pulling llama3:latest model...")
print(" This may take several minutes...")
subprocess.run(["ollama", "pull", "llama3:latest"], check=False)
except:
print("⚠️ Cannot check/pull model (Ollama not available)")
def main():
"""Main setup function."""
print("🦙 Llama 4 Maverick MCP Server (Python) Setup")
print("=" * 50)
# Run checks
check_python_version()
check_ollama()
# Setup environment
create_env_file()
install_dependencies()
pull_model()
print("\n✅ Setup complete!")
print("\nNext steps:")
print("1. Edit .env file with your configuration")
print("2. Start Ollama: ollama serve")
print("3. Run the server: python -m llama4_maverick_mcp.server")
print("4. Or install and run: llama4-mcp")
print("\nFor development:")
print(" pip install -e .[dev]")
print(" pytest")
if __name__ == "__main__":
main()