Skip to main content
Glama

MCP AI Service Platform

by dkb12138ggg
docker-compose.yml4.5 kB
version: '3.8' services: # MCP服务 mcp-client: build: context: . target: production ports: - "8000:8000" # API端口 - "8001:8001" # Prometheus指标端口 environment: # API配置 - API_HOST=0.0.0.0 - API_PORT=8000 - API_WORKERS=1 - API_MAX_CONCURRENT_REQUESTS=100 - API_REQUEST_TIMEOUT=300 - API_RATE_LIMIT_PER_MINUTE=60 # OpenAI配置 - OPENAI_API_KEY=${OPENAI_API_KEY} - OPENAI_BASE_URL=${OPENAI_BASE_URL:-https://api.openai.com/v1} - OPENAI_MODEL=${OPENAI_MODEL:-gpt-4} - OPENAI_MAX_TOKENS=1000 - OPENAI_TIMEOUT=30 # MCP连接配置 - MCP_SERVER_URL=mcp.json - MCP_MAX_CONNECTIONS_PER_SERVER=5 - MCP_CONNECTION_TIMEOUT=30 - MCP_RECONNECT_INTERVAL=5 - MCP_MAX_RECONNECT_ATTEMPTS=3 - MCP_HEALTH_CHECK_INTERVAL=60 # Redis配置 - REDIS_HOST=redis - REDIS_PORT=6379 - REDIS_DB=0 - REDIS_MAX_CONNECTIONS=20 # PostgreSQL配置 - POSTGRES_HOST=postgres - POSTGRES_PORT=5432 - POSTGRES_DB=mcp_rag - POSTGRES_USER=mcp_user - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-mcp_password123} - POSTGRES_MAX_CONNECTIONS=20 # 日志配置 - LOG_LEVEL=INFO - LOG_FORMAT=json # 监控配置 - MONITORING_ENABLE_PROMETHEUS=true - MONITORING_PROMETHEUS_PORT=8001 - MONITORING_ENABLE_HEALTH_CHECK=true # 环境设置 - ENVIRONMENT=production - DEBUG=false volumes: - ./logs:/app/logs - ./mcp.json:/app/mcp.json:ro depends_on: - redis - postgres restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 start_period: 60s deploy: resources: limits: memory: 1G cpus: '1.0' reservations: memory: 512M cpus: '0.5' # Redis缓存 redis: image: redis:7-alpine ports: - "6379:6379" volumes: - redis_data:/data - ./deployments/redis.conf:/usr/local/etc/redis/redis.conf:ro command: redis-server /usr/local/etc/redis/redis.conf restart: unless-stopped healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 30s timeout: 3s retries: 3 deploy: resources: limits: memory: 256M cpus: '0.5' # Prometheus监控 prometheus: image: prom/prometheus:latest ports: - "9090:9090" volumes: - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro - prometheus_data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/etc/prometheus/console_libraries' - '--web.console.templates=/etc/prometheus/consoles' - '--storage.tsdb.retention.time=200h' - '--web.enable-lifecycle' restart: unless-stopped depends_on: - mcp-client # Grafana仪表板 grafana: image: grafana/grafana:latest ports: - "3000:3000" volumes: - grafana_data:/var/lib/grafana - ./monitoring/grafana/provisioning:/etc/grafana/provisioning:ro - ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards:ro environment: - GF_SECURITY_ADMIN_USER=admin - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin123} - GF_USERS_ALLOW_SIGN_UP=false restart: unless-stopped depends_on: - prometheus # PostgreSQL数据库 postgres: image: pgvector/pgvector:pg16 ports: - "5432:5432" environment: - POSTGRES_DB=mcp_rag - POSTGRES_USER=mcp_user - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-mcp_password123} - POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C" volumes: - postgres_data:/var/lib/postgresql/data - ./deployments/init.sql:/docker-entrypoint-initdb.d/init.sql:ro restart: unless-stopped healthcheck: test: ["CMD-SHELL", "pg_isready -U mcp_user -d mcp_rag"] interval: 30s timeout: 10s retries: 3 start_period: 40s deploy: resources: limits: memory: 512M cpus: '1.0' volumes: redis_data: prometheus_data: grafana_data: postgres_data: networks: default: driver: bridge

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/dkb12138ggg/python-rag-mcp-client'

If you have feedback or need assistance with the MCP directory API, please join our Discord server