# Hybrid Docker Compose for Apple Silicon with Native GPU Acceleration
#
# This setup runs:
# - NornicDB: NATIVELY on macOS (for Metal GPU acceleration)
# - Other services: In Docker containers
#
# Usage:
# 1. Start NornicDB natively (in a separate terminal):
# cd nornicdb && ./nornicdb serve --http-port 7474 --bolt-port 7687 --data-dir ./data
#
# 2. Start Docker services:
# docker compose -f docker-compose.arm64.hybrid.yml up -d
#
# Prerequisites:
# - Build NornicDB with GPU support:
# export CGO_CFLAGS="-I/Users/c815719/src/vulkan-sdk/include"
# export CGO_LDFLAGS="-L/Users/c815719/src/vulkan-sdk/lib -lvulkan"
# CGO_ENABLED=1 go build -tags vulkan -o nornicdb ./cmd/nornicdb
services:
copilot-api:
image: timothyswt/copilot-api-arm64:latest
container_name: copilot_api_server
ports:
- "4141:4141"
volumes:
- ./copilot-data:/root/.local/share/copilot-api
environment:
- NODE_ENV=production
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget --spider -q http://localhost:4141/ || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 15s
networks:
- mcp_network
# llama.cpp server - OpenAI-compatible embeddings (ARM64 compatible)
llama-server:
image: timothyswt/llama-cpp-server-arm64-mxbai:latest
container_name: llama_server
ports:
- "11434:8080"
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8080/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- mcp_network
mimir-server:
build:
context: .
dockerfile: Dockerfile
tags:
- mimir-server:${VERSION:-1.0.0}
- mimir-server:latest
image: mimir-server:${VERSION:-1.0.0}
container_name: mimir_server
restart: unless-stopped
environment:
# Database Configuration - Connect to NornicDB running on HOST (native GPU)
- NEO4J_URI=bolt://host.docker.internal:7687
- NEO4J_USER=neo4j
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-password}
# Server Configuration
- NODE_ENV=production
- PORT=3000
# Workspace Configuration
- WORKSPACE_ROOT=/workspace
- HOST_WORKSPACE_ROOT=${HOST_WORKSPACE_ROOT:-~/src}
- HOST_HOME=${HOME}
# LLM API Configuration
- MIMIR_DEFAULT_PROVIDER=${MIMIR_DEFAULT_PROVIDER:-copilot}
- MIMIR_LLM_API=${MIMIR_LLM_API:-http://copilot-api:4141}
- MIMIR_LLM_API_PATH=${MIMIR_LLM_API_PATH:-/v1/chat/completions}
- MIMIR_LLM_API_MODELS_PATH=${MIMIR_LLM_API_MODELS_PATH:-/v1/models}
- MIMIR_LLM_API_KEY=${MIMIR_LLM_API_KEY:-dummy-key}
# PCTX Integration
- PCTX_URL=${PCTX_URL:-http://host.docker.internal:8080}
- PCTX_ENABLED=${PCTX_ENABLED:-false}
# Model Configuration
- MIMIR_DEFAULT_MODEL=${MIMIR_DEFAULT_MODEL:-gpt-4.1}
- MIMIR_DEFAULT_CONTEXT_WINDOW=${MIMIR_DEFAULT_CONTEXT_WINDOW:-128000}
# Embeddings API - Connect to llama-server in Docker
- MIMIR_EMBEDDINGS_PROVIDER=${MIMIR_EMBEDDINGS_PROVIDER:-openai}
- MIMIR_EMBEDDINGS_API=${MIMIR_EMBEDDINGS_API:-http://llama-server:8080}
- MIMIR_EMBEDDINGS_API_PATH=${MIMIR_EMBEDDINGS_API_PATH:-/v1/embeddings}
- MIMIR_EMBEDDINGS_API_MODELS_PATH=${MIMIR_EMBEDDINGS_API_MODELS_PATH:-/v1/models}
- MIMIR_EMBEDDINGS_API_KEY=${MIMIR_EMBEDDINGS_API_KEY:-dummy-key}
- MIMIR_EMBEDDINGS_ENABLED=${MIMIR_EMBEDDINGS_ENABLED:-true}
- MIMIR_EMBEDDINGS_MODEL=${MIMIR_EMBEDDINGS_MODEL:-mxbai-embed-large}
- MIMIR_EMBEDDINGS_DIMENSIONS=${MIMIR_EMBEDDINGS_DIMENSIONS:-1024}
# Feature Flags
- MIMIR_FEATURE_PM_MODEL_SUGGESTIONS=${MIMIR_FEATURE_PM_MODEL_SUGGESTIONS:-true}
- MIMIR_AUTO_INDEX_DOCS=${MIMIR_AUTO_INDEX_DOCS:-true}
# Security Configuration
- MIMIR_ENABLE_SECURITY=${MIMIR_ENABLE_SECURITY:-false}
# Advanced Configuration
- MIMIR_PARALLEL_EXECUTION=${MIMIR_PARALLEL_EXECUTION:-false}
- MIMIR_INSTALL_DIR=${MIMIR_INSTALL_DIR:-/app}
- MIMIR_AGENTS_DIR=${MIMIR_AGENTS_DIR:-/app/docs/agents}
volumes:
- ./data:/app/data
- ./logs:/app/logs
- ${HOST_WORKSPACE_ROOT:-~/src}:${WORKSPACE_ROOT:-/workspace}
ports:
- "9042:3000"
healthcheck:
test: ["CMD", "node", "-e", "require('http').get('http://localhost:3000/health', (res) => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
depends_on:
copilot-api:
condition: service_healthy
llama-server:
condition: service_healthy
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- mcp_network
networks:
mcp_network:
driver: bridge