Skip to main content
Glama

Model Control Plane (MCP) Server

docker-compose.yml4.84 kB
services: prometheus: build: context: . dockerfile: Dockerfile.prometheus container_name: prometheus ports: - "9090:9090" volumes: - prometheus_data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--web.enable-lifecycle' restart: unless-stopped networks: - monitoring # No user setting - let Dockerfile handle users and permissions extra_hosts: - "host.docker.internal:host-gateway" node-exporter: image: prom/node-exporter:latest container_name: node-exporter ports: - "9100:9100" volumes: - /proc:/host/proc:ro - /sys:/host/sys:ro - /:/rootfs:ro command: - '--path.procfs=/host/proc' - '--path.sysfs=/host/sys' - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' - '--collector.meminfo' restart: unless-stopped networks: - monitoring cadvisor: image: gcr.io/cadvisor/cadvisor:latest container_name: cadvisor ports: - "8080:8080" volumes: - /:/rootfs:ro - /var/run:/var/run:ro - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro - /dev/disk/:/dev/disk:ro restart: unless-stopped networks: - monitoring # New Langflow service langflow: build: context: . dockerfile: Dockerfile.langflow container_name: langflow network_mode: host environment: - LANGFLOW_COMPONENTS_PATH=/home/user/app/src/components/custom/tools - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_RELOAD=true - DOCKER_HOST_IP=172.17.0.1 extra_hosts: - "host.docker.internal:host-gateway" command: langflow run --host 0.0.0.0 --port 7860 --dev restart: unless-stopped # Memory stress container for testing alerts memory-stress: build: context: . dockerfile: Dockerfile.memory-stress container_name: memory-stress mem_limit: 256m restart: "no" # Don't restart this container automatically depends_on: - prometheus - cadvisor networks: - monitoring # Grafana for visualization grafana: build: context: . dockerfile: Dockerfile.grafana container_name: grafana ports: - "3000:3000" environment: - GF_SECURITY_ADMIN_USER=admin - GF_SECURITY_ADMIN_PASSWORD=admin - GF_USERS_ALLOW_SIGN_UP=false - GF_INSTALL_PLUGINS=grafana-piechart-panel volumes: - grafana-data:/var/lib/grafana restart: unless-stopped depends_on: - prometheus networks: - monitoring # MCP-Grafana Bridge service using a simple Python container mcp-grafana: image: python:3.9-slim container_name: mcp-grafana ports: - "8085:8085" volumes: - ./mcp_grafana_bridge:/app working_dir: /app command: ./startup.sh environment: - GRAFANA_URL=http://grafana:3000 - PROMETHEUS_URL=http://prometheus:9090 - MCP_PORT=8085 - GRAFANA_API_KEY=${GRAFANA_API_KEY:-} - GRAFANA_USERNAME=${GRAFANA_USERNAME:-admin} - GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-admin} depends_on: - grafana - prometheus restart: unless-stopped networks: - monitoring # Kubernetes Performance Anomaly Detection Service k8s-anomaly-detection: image: python:3.9-slim container_name: k8s-anomaly-detection ports: - "8086:8086" volumes: - ./mcp_grafana_bridge:/app - ./scripts:/app/scripts working_dir: /app command: > bash -c "pip install flask requests numpy && python mcp_anomaly_detection.py" environment: - PROMETHEUS_URL=http://prometheus:9090 - K8S_SCRIPT_PATH=/app/scripts/kubernetes_performance_anomalies.py - ANOMALY_DETECTION_INTERVAL=300 - ANOMALY_API_PORT=8086 - DETECTION_ENABLED=true depends_on: - prometheus - cadvisor - node-exporter restart: unless-stopped networks: - monitoring # Kubernetes Metrics Generator k8s-metrics-generator: image: python:3.9-slim container_name: k8s-metrics-generator ports: - "9093:9093" volumes: - ./scripts:/app/scripts working_dir: /app command: > bash -c "pip install numpy && python scripts/k8s_dummy_data_generator.py --pods 35 --http-port 9093 --interval 2" restart: unless-stopped networks: - monitoring extra_hosts: - "host.docker.internal:host-gateway" volumes: grafana-data: {} prometheus_data: {} networks: monitoring: driver: bridge

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/dvladimirov/MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server