Skip to main content
Glama
orneryd

M.I.M.I.R - Multi-agent Intelligent Memory & Insight Repository

by orneryd
docker-compose.extend.yml3.6 kB
services: # Development container with additional tools mimir-dev: build: context: . dockerfile: .devcontainer/Dockerfile container_name: mimir_dev restart: unless-stopped command: sleep infinity user: "1000:1000" # Run as node user (uid 1000) environment: # Database Configuration - NEO4J_URI=bolt://neo4j:7687 - NEO4J_USER=neo4j - NEO4J_PASSWORD=${NEO4J_PASSWORD:-password} # Server Configuration - NODE_ENV=development - PORT=3000 # Workspace Configuration - WORKSPACE_ROOT=/workspace - HOST_WORKSPACE_ROOT=/workspace # LLM API Configuration (Copilot - OpenAI-compatible) - MIMIR_DEFAULT_PROVIDER=${MIMIR_DEFAULT_PROVIDER:-copilot} - MIMIR_LLM_API=${MIMIR_LLM_API:-http://copilot-api:4141} - MIMIR_LLM_API_PATH=${MIMIR_LLM_API_PATH:-/v1/chat/completions} - MIMIR_LLM_API_MODELS_PATH=${MIMIR_LLM_API_MODELS_PATH:-/v1/models} - MIMIR_LLM_API_KEY=${MIMIR_LLM_API_KEY:-dummy-key} # Provider and Model Configuration - MIMIR_DEFAULT_MODEL=${MIMIR_DEFAULT_MODEL:-gpt-4.1} - MIMIR_PM_MODEL=${MIMIR_PM_MODEL:-gpt-4.1} - MIMIR_WORKER_MODEL=${MIMIR_WORKER_MODEL:-gpt-4.1} - MIMIR_QC_MODEL=${MIMIR_QC_MODEL:-gpt-4.1} # Context Window Configuration - MIMIR_DEFAULT_CONTEXT_WINDOW=${MIMIR_DEFAULT_CONTEXT_WINDOW:-128000} # Embeddings API Configuration (llama.cpp server - OpenAI-compatible) - MIMIR_EMBEDDINGS_PROVIDER=${MIMIR_EMBEDDINGS_PROVIDER:-openai} - MIMIR_EMBEDDINGS_API=${MIMIR_EMBEDDINGS_API:-http://llama-server:8080} - MIMIR_EMBEDDINGS_API_PATH=${MIMIR_EMBEDDINGS_API_PATH:-/v1/embeddings} - MIMIR_EMBEDDINGS_API_MODELS_PATH=${MIMIR_EMBEDDINGS_API_MODELS_PATH:-/v1/models} - MIMIR_EMBEDDINGS_API_KEY=${MIMIR_EMBEDDINGS_API_KEY:-dummy-key} # Embeddings Configuration - MIMIR_EMBEDDINGS_ENABLED=${MIMIR_EMBEDDINGS_ENABLED:-true} - MIMIR_EMBEDDINGS_MODEL=${MIMIR_EMBEDDINGS_MODEL:-mxbai-embed-large} - MIMIR_EMBEDDINGS_DIMENSIONS=${MIMIR_EMBEDDINGS_DIMENSIONS:-1024} - MIMIR_EMBEDDINGS_CHUNK_SIZE=${MIMIR_EMBEDDINGS_CHUNK_SIZE:-768} - MIMIR_EMBEDDINGS_CHUNK_OVERLAP=${MIMIR_EMBEDDINGS_CHUNK_OVERLAP:-10} - MIMIR_EMBEDDINGS_DELAY_MS=${MIMIR_EMBEDDINGS_DELAY_MS:-100} - MIMIR_EMBEDDINGS_MAX_RETRIES=${MIMIR_EMBEDDINGS_MAX_RETRIES:-3} # Indexing Configuration - MIMIR_INDEXING_THREADS=${MIMIR_INDEXING_THREADS:-1} # Feature Flags - MIMIR_FEATURE_PM_MODEL_SUGGESTIONS=${MIMIR_FEATURE_PM_MODEL_SUGGESTIONS:-true} - MIMIR_AUTO_INDEX_DOCS=${MIMIR_AUTO_INDEX_DOCS:-true} # Advanced Configuration - MIMIR_PARALLEL_EXECUTION=${MIMIR_PARALLEL_EXECUTION:-false} - MIMIR_INSTALL_DIR=${MIMIR_INSTALL_DIR:-/app} - MIMIR_AGENTS_DIR=${MIMIR_AGENTS_DIR:-/app/docs/agents} volumes: # Mount project source (read-write for development) - .:/app:cached # Share data and logs with host - ./data:/app/data - ./logs:/app/logs # Workspace mount for file indexing - ${HOST_WORKSPACE_ROOT:-~/src}:/workspace:cached # Preserve node_modules in container - /app/node_modules - /app/frontend/node_modules ports: - "9042:3000" # HTTP server - "5173:5173" # Vite dev server depends_on: neo4j: condition: service_healthy copilot-api: condition: service_healthy llama-server: condition: service_healthy networks: - mcp_network # Development container doesn't need health check

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/orneryd/Mimir'

If you have feedback or need assistance with the MCP directory API, please join our Discord server