# Use Python 3.12 slim image for smaller size
FROM python:3.12-slim
# Set working directory
WORKDIR /app
# Install system dependencies for file watching, text processing, and ML libraries
RUN apt-get update && apt-get install -y \
gcc \
g++ \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements first for better Docker layer caching
COPY requirements.txt .
COPY pyproject.toml .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy source code
COPY src/ ./src/
# Install the package
RUN pip install .
# Create index directories - permissions will be handled by running with host user
RUN mkdir -p /app/index /app/vector-index /app/.cache
# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1
# Set HuggingFace cache directories to a writable location
ENV HF_HOME=/app/.cache
ENV SENTENCE_TRANSFORMERS_HOME=/app/.cache/sentence_transformers
# Default command
CMD ["obsidian-mcp-server"]
MCP directory API
We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/Bachmann1234/obsidianMCP'
If you have feedback or need assistance with the MCP directory API, please join our Discord server