docker-compose.yml•1.39 kB
version: '3.8'
services:
vllm-mcp:
build: .
container_name: vllm-mcp-server
ports:
- "8080:8080"
environment:
# OpenAI Configuration
- OPENAI_API_KEY=${OPENAI_API_KEY}
- OPENAI_BASE_URL=${OPENAI_BASE_URL:-https://api.openai.com/v1}
# Dashscope Configuration
- DASHSCOPE_API_KEY=${DASHSCOPE_API_KEY}
# Server Configuration
- VLLM_MCP_HOST=0.0.0.0
- VLLM_MCP_PORT=8080
- VLLM_MCP_TRANSPORT=http
- VLLM_MCP_LOG_LEVEL=${LOG_LEVEL:-INFO}
# Optional: Custom config
- VLLM_MCP_CONFIG_PATH=/app/config.json
volumes:
- ./config.json:/app/config.json:ro
- ./logs:/app/logs
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- vllm-mcp-network
# Optional: Add nginx reverse proxy for production
nginx:
image: nginx:alpine
container_name: vllm-mcp-nginx
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
depends_on:
- vllm-mcp
restart: unless-stopped
networks:
- vllm-mcp-network
profiles:
- production
networks:
vllm-mcp-network:
driver: bridge
volumes:
logs:
driver: local