We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/chweyun/mcp-link-scan'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
init-ollama.sh•581 B
#!/bin/sh
# Initialize Ollama and pull llama3 model
echo "Starting Ollama server..."
ollama serve &
# Wait for Ollama to be ready
echo "Waiting for Ollama to be ready..."
sleep 10
# Check if Ollama is ready
until curl -s http://localhost:11434/api/tags > /dev/null 2>&1; do
echo "Waiting for Ollama API..."
sleep 2
done
MODEL_TO_PULL=${OLLAMA_MODEL:-llama3:latest}
echo "Ollama is ready. Pulling model: $MODEL_TO_PULL..."
ollama pull "$MODEL_TO_PULL"
echo "$MODEL_TO_PULL model pulled successfully. Keeping Ollama server running..."
# Keep the container running
wait