docker-compose.production.yml•6.46 kB
version: '3.8'
services:
# Production MCP Server
mcp-server:
build:
context: .
dockerfile: Dockerfile.production
container_name: mcp-production-server
restart: unless-stopped
ports:
- "8000:8000"
environment:
- ENVIRONMENT=production
- MONGODB_CONNECTION_STRING=mongodb://mongodb:27017/mcp_production
- OPENWEATHER_API_KEY=${OPENWEATHER_API_KEY}
- LOG_LEVEL=INFO
volumes:
- ./agents:/app/agents
- ./config:/app/config
- ./logs:/app/logs
- ./.env:/app/.env
depends_on:
- mongodb
- redis
networks:
- mcp-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
deploy:
resources:
limits:
memory: 2G
cpus: '1.0'
reservations:
memory: 1G
cpus: '0.5'
# MongoDB Database
mongodb:
image: mongo:7.0
container_name: mcp-mongodb
restart: unless-stopped
ports:
- "27017:27017"
environment:
- MONGO_INITDB_ROOT_USERNAME=admin
- MONGO_INITDB_ROOT_PASSWORD=${MONGODB_ROOT_PASSWORD:-admin123}
- MONGO_INITDB_DATABASE=mcp_production
volumes:
- mongodb_data:/data/db
- mongodb_config:/data/configdb
- ./scripts/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
networks:
- mcp-network
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
deploy:
resources:
limits:
memory: 1G
cpus: '0.5'
# Redis Cache
redis:
image: redis:7.2-alpine
container_name: mcp-redis
restart: unless-stopped
ports:
- "6379:6379"
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
volumes:
- redis_data:/data
networks:
- mcp-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
limits:
memory: 512M
cpus: '0.25'
# Nginx Load Balancer (for multiple MCP instances)
nginx:
image: nginx:alpine
container_name: mcp-nginx
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./config/nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
depends_on:
- mcp-server
networks:
- mcp-network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
interval: 30s
timeout: 10s
retries: 3
# Monitoring with Prometheus
prometheus:
image: prom/prometheus:latest
container_name: mcp-prometheus
restart: unless-stopped
ports:
- "9090:9090"
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
networks:
- mcp-network
# Grafana Dashboard
grafana:
image: grafana/grafana:latest
container_name: mcp-grafana
restart: unless-stopped
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin123}
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./config/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./config/grafana/datasources:/etc/grafana/provisioning/datasources:ro
depends_on:
- prometheus
networks:
- mcp-network
# Log Aggregation with ELK Stack (Optional)
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
container_name: mcp-elasticsearch
restart: unless-stopped
environment:
- discovery.type=single-node
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
ports:
- "9200:9200"
networks:
- mcp-network
profiles:
- logging
logstash:
image: docker.elastic.co/logstash/logstash:8.11.0
container_name: mcp-logstash
restart: unless-stopped
volumes:
- ./config/logstash/pipeline:/usr/share/logstash/pipeline:ro
- ./logs:/app/logs:ro
depends_on:
- elasticsearch
networks:
- mcp-network
profiles:
- logging
kibana:
image: docker.elastic.co/kibana/kibana:8.11.0
container_name: mcp-kibana
restart: unless-stopped
ports:
- "5601:5601"
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
depends_on:
- elasticsearch
networks:
- mcp-network
profiles:
- logging
# Backup Service
backup:
image: alpine:latest
container_name: mcp-backup
restart: "no"
volumes:
- mongodb_data:/backup/mongodb:ro
- ./agents:/backup/agents:ro
- ./logs:/backup/logs:ro
- ./backups:/backups
command: |
sh -c "
apk add --no-cache mongodb-tools curl &&
while true; do
echo 'Starting backup at $(date)'
mongodump --host mongodb:27017 --out /backups/mongodb-$(date +%Y%m%d_%H%M%S)
tar -czf /backups/agents-$(date +%Y%m%d_%H%M%S).tar.gz -C /backup agents
find /backups -name '*.tar.gz' -mtime +30 -delete
find /backups -name 'mongodb-*' -mtime +30 -exec rm -rf {} +
echo 'Backup completed at $(date)'
sleep 86400
done
"
depends_on:
- mongodb
networks:
- mcp-network
profiles:
- backup
# Networks
networks:
mcp-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
# Volumes
volumes:
mongodb_data:
driver: local
mongodb_config:
driver: local
redis_data:
driver: local
prometheus_data:
driver: local
grafana_data:
driver: local
elasticsearch_data:
driver: local
# Health check for the entire stack
x-healthcheck-defaults: &healthcheck-defaults
interval: 30s
timeout: 10s
retries: 3
start_period: 40s