name: nanonets-mcp
services:
nanonets-mcp:
build: .
image: nanonets-mcp:latest
container_name: nanonets-mcp-server
restart: unless-stopped
ports:
- "8005:8000"
environment:
- HF_HOME=/app/.cache/huggingface
- NVIDIA_VISIBLE_DEVICES=all
volumes:
# Persist model cache to avoid re-downloading
- huggingface_cache:/app/.cache/huggingface
# Optional: Mount local images directory
- ./images:/app/images:ro
healthcheck:
test: ["CMD", "python", "-c", "from nanonets_mcp.server import mcp; print('OK')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 120s
# Modern GPU support for Docker Engine v28+
deploy:
resources:
limits:
memory: 16G
reservations:
memory: 8G
devices:
- driver: nvidia
device_ids: ['0', '1'] # Specify GPU IDs
capabilities: [gpu]
volumes:
huggingface_cache:
driver: local
networks:
default:
name: nanonets-mcp-network