We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/neuhausi/canvasxpress-mcp-server-main'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
# Environment variables for CanvasXpress MCP Server
# ============================================================
# LLM PROVIDER SELECTION
# ============================================================
# Choose your LLM provider: "openai" or "gemini"
LLM_PROVIDER=openai
# ============================================================
# AZURE OPENAI CONFIGURATION (when LLM_PROVIDER=openai)
# ============================================================
# Azure OpenAI API Key (from genai.web.bms.com)
AZURE_OPENAI_KEY=your_azure_openai_key_here
# Azure OpenAI API Version
AZURE_OPENAI_API_VERSION=2024-02-01
# Azure OpenAI Model (deployment name)
# Options: gpt-4o-mini-global, gpt-4o-global, gpt-4-turbo-global
LLM_MODEL=gpt-4o-mini-global
# BMS Environment (nonprod or prod)
LLM_ENVIRONMENT=nonprod
# ============================================================
# GOOGLE GEMINI CONFIGURATION (when LLM_PROVIDER=gemini)
# ============================================================
# Google AI API Key (from https://aistudio.google.com/apikey)
GOOGLE_API_KEY=your_google_api_key_here
# Gemini Model
# Options: gemini-2.0-flash-exp, gemini-1.5-flash, gemini-1.5-pro
GEMINI_MODEL=gemini-2.0-flash-exp
# ============================================================
# EMBEDDING PROVIDER SELECTION
# ============================================================
# Choose your embedding provider: "local", "onnx", "openai", or "gemini"
# - local: Uses BGE-M3 model locally (requires PyTorch ~2GB, proven 93% accuracy)
# - onnx: Uses lightweight ONNX models locally (no PyTorch, ~100MB, good accuracy)
# - openai: Uses Azure OpenAI text-embedding-3-small API
# - gemini: Uses Gemini text-embedding-004 API (faster startup, no local model)
EMBEDDING_PROVIDER=local
# ONNX embedding model (when EMBEDDING_PROVIDER=onnx)
# Lightweight local models (~1GB RAM vs ~3-4GB for BGE-M3 local)
#
# Recommended for light servers:
# all-MiniLM-L6-v2 (384d, ~22MB, fastest) <- DEFAULT
# all-MiniLM-L12-v2 (384d, ~33MB, better quality)
# multi-qa-MiniLM-L6-cos-v1 (384d, ~22MB, optimized for Q&A)
#
# Higher quality (more RAM):
# all-mpnet-base-v2 (768d, ~420MB, best quality)
# BAAI/bge-small-en-v1.5 (384d, ~33MB, BGE family)
# BAAI/bge-base-en-v1.5 (768d, ~110MB, BGE family)
#
# Long context (8192 tokens):
# nomic-ai/nomic-embed-text-v1.5 (768d, ~100MB, beats OpenAI ada-002)
#
# See: https://www.sbert.net/docs/pretrained_models.html
ONNX_EMBEDDING_MODEL=all-MiniLM-L6-v2
# OpenAI embedding model (when EMBEDDING_PROVIDER=openai)
# Options: text-embedding-3-small, text-embedding-3-large, text-embedding-ada-002
OPENAI_EMBEDDING_MODEL=text-embedding-3-small
# Gemini embedding model (when EMBEDDING_PROVIDER=gemini)
GEMINI_EMBEDDING_MODEL=text-embedding-004
# ============================================================
# MCP SERVER CONFIGURATION
# ============================================================
# Transport mode: http (network, default) or stdio (local, for Claude Desktop)
MCP_TRANSPORT=http
# HTTP mode settings (only used when MCP_TRANSPORT=http)
MCP_HOST=0.0.0.0
MCP_PORT=8000
# ============================================================
# PROMPT TEMPLATE CONFIGURATION
# ============================================================
# Prompt template version: v2 (default, includes rules) or v1 (original, no rules)
# v2 uses prompt_template_v2.md with canvasxpress_rules.md for better accuracy
# v1 uses the original prompt_template.md
PROMPT_VERSION=v2
# ============================================================
# ALTERNATIVE WORDING GENERATION (scripts/generate_alt_wordings.py)
# ============================================================
# Number of alternative wordings to generate per example
ALT_WORDING_COUNT=3
# Temperature for alternative wording generation (higher = more creative)
ALT_WORDING_TEMP=0.1