We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/lhmpaiPublic/McpLLMServer'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
import httpx
OLLAMA_URL = "http://ollama:11434/api/generate"
MODEL = "llama3"
async def generate(prompt: str) -> str:
async with httpx.AsyncClient(timeout=60) as client:
response = await client.post(
OLLAMA_URL,
json={
"model": MODEL,
"prompt": prompt,
"stream": False
}
)
response.raise_for_status()
return response.json()["response"]