services:
neo4j:
image: neo4j:5.15-community
container_name: neo4j_db
ports:
- "7474:7474" # HTTP Browser UI
- "7687:7687" # Bolt protocol
volumes:
- ./data/neo4j:/data
- ./logs/neo4j:/logs
- ./data/neo4j/import:/var/lib/neo4j/import
environment:
- NEO4J_AUTH=neo4j/${NEO4J_PASSWORD:-password}
- NEO4J_dbms_memory_pagecache_size=512M
- NEO4J_dbms_memory_heap_initial__size=512M
- NEO4J_dbms_memory_heap_max__size=2G
- NEO4J_PLUGINS=["apoc"]
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "cypher-shell -u neo4j -p $${NEO4J_PASSWORD:-password} 'RETURN 1' || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- mcp_network
copilot-api:
image: timothyswt/copilot-api-arm64:latest
container_name: copilot_api_server
ports:
- "4141:4141" # Fixed: copilot-api listens on 4141, not 3000
volumes:
- ./copilot-data:/root/.local/share/copilot-api # Persist GitHub token
environment:
- NODE_ENV=production
# Remove PORT=3000, the app uses 4141 by default
restart: unless-stopped
healthcheck:
# Use CMD-SHELL so shell operators (||) work and allow a proper HTTP probe
test: ["CMD-SHELL", "wget --spider -q http://localhost:4141/ || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 15s
networks:
- mcp_network
llama-server:
image: timothyswt/llama-cpp-server-arm64-mxbai:latest
container_name: llama_server
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- mcp_network
# Uncomment if you have GPU support (NVIDIA)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# Vision-Language server for multimodal embeddings (images + text)
# Uncomment to enable image embeddings, or swap with llama-server above
llama-vl-server:
image: timothyswt/llama-cpp-server-arm64-qwen2.5-vl-2b:latest
container_name: llama_vl_server
# ports:
# - "11436:8080" # Different external port: 11435 -> Internal 8080
environment:
# Server Configuration
- LLAMA_ARG_HOST=127.0.0.1
- LLAMA_ARG_PORT=8080
- LLAMA_ARG_CTX_SIZE=8192
- LLAMA_ARG_N_PARALLEL=4
# Embeddings-specific
- LLAMA_ARG_EMBEDDINGS=true
- LLAMA_ARG_POOLING=mean
# Performance
- LLAMA_ARG_THREADS=-1
- LLAMA_ARG_NO_MMAP=false
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- mcp_network
mimir-server:
build:
context: .
dockerfile: Dockerfile
tags:
- timothyswt/mimir-server:1.0.0
image: timothyswt/mimir-server:1.0.0
container_name: mimir_server
restart: unless-stopped
environment:
# Database Configuration
- NEO4J_URI=bolt://neo4j:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-admin}
# Server Configuration
- NODE_ENV=production
- PORT=3000
# - NODE_TLS_REJECT_UNAUTHORIZED=${NODE_TLS_REJECT_UNAUTHORIZED:-1}
# Workspace Configuration
- WORKSPACE_ROOT=/workspace
- HOST_WORKSPACE_ROOT=${HOST_WORKSPACE_ROOT:-~/src} # Pass through from host
- HOST_HOME=${HOME} # Host's home directory for expanding ~ in HOST_WORKSPACE_ROOT
# LLM API Configuration
- MIMIR_DEFAULT_PROVIDER=${MIMIR_DEFAULT_PROVIDER:-copilot}
- MIMIR_LLM_API=${MIMIR_LLM_API:-http://copilot-api:4141}
- MIMIR_LLM_API_PATH=${MIMIR_LLM_API_PATH:-/v1/chat/completions}
- MIMIR_LLM_API_MODELS_PATH=${MIMIR_LLM_API_MODELS_PATH:-/v1/models}
- MIMIR_LLM_API_KEY=${MIMIR_LLM_API_KEY:-dummy-key}
# Provider and Model Configuration (100% dynamic - no config file needed)
- MIMIR_DEFAULT_MODEL=${MIMIR_DEFAULT_MODEL:-gpt-4.1}
# Per-Agent Model Configuration (optional overrides)
- MIMIR_PM_MODEL=${MIMIR_PM_MODEL:-}
- MIMIR_WORKER_MODEL=${MIMIR_WORKER_MODEL:-}
- MIMIR_QC_MODEL=${MIMIR_QC_MODEL:-}
# Context Window Configuration
- MIMIR_DEFAULT_CONTEXT_WINDOW=${MIMIR_DEFAULT_CONTEXT_WINDOW:-128000}
- MIMIR_EMBEDDINGS_PROVIDER=${MIMIR_EMBEDDINGS_PROVIDER:-openai}
- MIMIR_EMBEDDINGS_API=${MIMIR_EMBEDDINGS_API:-http://llama-server:8080}
- MIMIR_EMBEDDINGS_API_PATH=${MIMIR_EMBEDDINGS_API_PATH:-/v1/embeddings}
- MIMIR_EMBEDDINGS_API_MODELS_PATH=${MIMIR_EMBEDDINGS_API_MODELS_PATH:-/v1/models}
- MIMIR_EMBEDDINGS_API_KEY=${MIMIR_EMBEDDINGS_API_KEY:-dummy-key}
# Embeddings Configuration (llama.cpp uses OpenAI-compatible format)
- MIMIR_EMBEDDINGS_ENABLED=${MIMIR_EMBEDDINGS_ENABLED:-true}
- MIMIR_EMBEDDINGS_MODEL=${MIMIR_EMBEDDINGS_MODEL:-mxbai-embed-large}
- MIMIR_EMBEDDINGS_DIMENSIONS=${MIMIR_EMBEDDINGS_DIMENSIONS:-1024}
- MIMIR_EMBEDDINGS_CHUNK_SIZE=${MIMIR_EMBEDDINGS_CHUNK_SIZE:-768}
- MIMIR_EMBEDDINGS_CHUNK_OVERLAP=${MIMIR_EMBEDDINGS_CHUNK_OVERLAP:-10}
- MIMIR_EMBEDDINGS_DELAY_MS=${MIMIR_EMBEDDINGS_DELAY_MS:-100}
- MIMIR_EMBEDDINGS_MAX_RETRIES=${MIMIR_EMBEDDINGS_MAX_RETRIES:-3}
# Image/VL Embeddings Configuration (Vision-Language models)
# Image Embeddings Control (disabled by default for safety)
- MIMIR_EMBEDDINGS_IMAGES=${MIMIR_EMBEDDINGS_IMAGES:-true} # Default: disabled
- MIMIR_EMBEDDINGS_IMAGES_DESCRIBE_MODE=${MIMIR_EMBEDDINGS_IMAGES_DESCRIBE_MODE:-true} # Default: VL description mode
# Qwen2.5-VL Configuration (for llama.cpp server)
- MIMIR_EMBEDDINGS_VL_PROVIDER=${MIMIR_EMBEDDINGS_VL_PROVIDER:-llama.cpp}
- MIMIR_EMBEDDINGS_VL_API=${MIMIR_EMBEDDINGS_VL_API:-http://llama-vl-server:8080}
- MIMIR_EMBEDDINGS_VL_API_PATH=${MIMIR_EMBEDDINGS_VL_API_PATH:-/v1/chat/completions}
- MIMIR_EMBEDDINGS_VL_API_KEY=${MIMIR_EMBEDDINGS_VL_API_KEY:-dummy-key}
- MIMIR_EMBEDDINGS_VL_MODEL=${MIMIR_EMBEDDINGS_VL_MODEL:-qwen2.5-vl}
- MIMIR_EMBEDDINGS_VL_CONTEXT_SIZE=${MIMIR_EMBEDDINGS_VL_CONTEXT_SIZE:-131072} # 128K tokens (7b/72b)
- MIMIR_EMBEDDINGS_VL_MAX_TOKENS=${MIMIR_EMBEDDINGS_VL_MAX_TOKENS:-2048} # Max description length
- MIMIR_EMBEDDINGS_VL_TEMPERATURE=${MIMIR_EMBEDDINGS_VL_TEMPERATURE:-0.7}
- MIMIR_EMBEDDINGS_VL_DIMENSIONS=${MIMIR_EMBEDDINGS_VL_DIMENSIONS:-768} # Falls back to text model dims
- MIMIR_EMBEDDINGS_VL_TIMEOUT=${MIMIR_EMBEDDINGS_VL_TIMEOUT:-180000} # 3 minutes (VL processing is slow)
# Indexing Configuration
- MIMIR_INDEXING_THREADS=${MIMIR_INDEXING_THREADS:-1}
# Feature Flags
- MIMIR_FEATURE_PM_MODEL_SUGGESTIONS=${MIMIR_FEATURE_PM_MODEL_SUGGESTIONS:-true}
- MIMIR_AUTO_INDEX_DOCS=${MIMIR_AUTO_INDEX_DOCS:-true}
# Agent Execution Limits
- MIMIR_AGENT_RECURSION_LIMIT=${MIMIR_AGENT_RECURSION_LIMIT:-100}
# Security Configuration
- MIMIR_ENABLE_SECURITY=${MIMIR_ENABLE_SECURITY:-false}
- MIMIR_DEV_USER_ADMIN=${MIMIR_DEV_USER_ADMIN}
- MIMIR_DEV_USER_DEVELOPER=${MIMIR_DEV_USER_DEVELOPER}
- MIMIR_DEV_USER_ANALYST=${MIMIR_DEV_USER_ANALYST}
- MIMIR_DEV_USER_VIEWER=${MIMIR_DEV_USER_VIEWER}
- MIMIR_JWT_SECRET=${MIMIR_JWT_SECRET}
# OAuth Configuration (explicit endpoint URLs - provider-specific)
- MIMIR_AUTH_PROVIDER=${MIMIR_AUTH_PROVIDER}
- MIMIR_OAUTH_AUTHORIZATION_URL=${MIMIR_OAUTH_AUTHORIZATION_URL} # Full authorization endpoint URL
- MIMIR_OAUTH_TOKEN_URL=${MIMIR_OAUTH_TOKEN_URL} # Full token endpoint URL
- MIMIR_OAUTH_USERINFO_URL=${MIMIR_OAUTH_USERINFO_URL} # Full userinfo endpoint URL
- MIMIR_OAUTH_CLIENT_ID=${MIMIR_OAUTH_CLIENT_ID}
- MIMIR_OAUTH_CLIENT_SECRET=${MIMIR_OAUTH_CLIENT_SECRET}
- MIMIR_OAUTH_CALLBACK_URL=${MIMIR_OAUTH_CALLBACK_URL}
- MIMIR_OAUTH_ALLOW_HTTP=${MIMIR_OAUTH_ALLOW_HTTP}
# Advanced Configuration
- MIMIR_PARALLEL_EXECUTION=${MIMIR_PARALLEL_EXECUTION:-false}
- MIMIR_INSTALL_DIR=${MIMIR_INSTALL_DIR:-/app}
- MIMIR_AGENTS_DIR=${MIMIR_AGENTS_DIR:-/app/docs/agents}
# Embeddings API Configuration
# - MIMIR_EMBEDDINGS_PROVIDER=${MIMIR_EMBEDDINGS_PROVIDER:-openai}
# - MIMIR_EMBEDDINGS_API=${MIMIR_EMBEDDINGS_API:-http://llama-server:8080}
# - MIMIR_EMBEDDINGS_API_PATH=${MIMIR_EMBEDDINGS_API_PATH:-/v1/embeddings}
# - MIMIR_EMBEDDINGS_API_MODELS_PATH=${MIMIR_EMBEDDINGS_API_MODELS_PATH:-/v1/models}
# - MIMIR_EMBEDDINGS_API_KEY=${MIMIR_EMBEDDINGS_API_KEY:-dummy-key}
# Embeddings Configuration
# - MIMIR_EMBEDDINGS_ENABLED=${MIMIR_EMBEDDINGS_ENABLED:-true}
# - MIMIR_EMBEDDINGS_MODEL=${MIMIR_EMBEDDINGS_MODEL:-bge-m3}
# - MIMIR_EMBEDDINGS_DIMENSIONS=${MIMIR_EMBEDDINGS_DIMENSIONS:-1024}
# - MIMIR_EMBEDDINGS_CHUNK_SIZE=${MIMIR_EMBEDDINGS_CHUNK_SIZE:-768}
# - MIMIR_EMBEDDINGS_CHUNK_OVERLAP=${MIMIR_EMBEDDINGS_CHUNK_OVERLAP:-100}
# - MIMIR_EMBEDDINGS_DELAY_MS=${MIMIR_EMBEDDINGS_DELAY_MS:-0}
# - MIMIR_EMBEDDINGS_MAX_RETRIES=${MIMIR_EMBEDDINGS_MAX_RETRIES:-3}
# PCTX Integration (Code Mode for 90-98% token reduction)
- PCTX_URL=${PCTX_URL:-http://host.docker.internal:8080}
- PCTX_ENABLED=${PCTX_ENABLED:-false}
volumes:
- ./data:/app/data
- ./logs:/app/logs
- ${HOST_WORKSPACE_ROOT:-~/src}:${WORKSPACE_ROOT:-/workspace}
ports:
- "9042:3000"
healthcheck:
test: ["CMD", "node", "-e", "require('http').get('http://localhost:3000/health', (res) => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
depends_on:
neo4j:
condition: service_healthy
copilot-api:
condition: service_healthy
llama-server:
condition: service_healthy
llama-vl-server:
condition: service_healthy
networks:
- mcp_network
networks:
mcp_network:
driver: bridge