Skip to main content
Glama
cbcoutinho

Nextcloud MCP Server

by cbcoutinho
rag-evaluation.yml3.76 kB
name: RAG Evaluation on: workflow_dispatch: inputs: manual_path: description: 'Path to Nextcloud User Manual PDF in Nextcloud' required: false default: 'Nextcloud Manual.pdf' embedding_model: description: 'OpenAI embedding model' required: false default: 'openai/text-embedding-3-small' generation_model: description: 'OpenAI generation model' required: false default: 'openai/gpt-4o-mini' jobs: rag-evaluation: runs-on: ubuntu-latest timeout-minutes: 30 permissions: models: read steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Run docker compose with vector sync uses: hoverkraft-tech/compose-action@248470ecc5ed40d8ed3d4480d8260d77179ef579 # v2.4.2 with: compose-file: | ./docker-compose.yml ./docker-compose.ci.yml up-flags: "--build" env: # Environment variables passed to docker-compose.ci.yml OPENAI_API_KEY: ${{ secrets.GITHUB_TOKEN }} OPENAI_BASE_URL: "https://models.github.ai/inference" OPENAI_EMBEDDING_MODEL: ${{ inputs.embedding_model }} OPENAI_GENERATION_MODEL: ${{ inputs.generation_model }} VECTOR_SYNC_SCAN_INTERVAL: "5" - name: Install the latest version of uv uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6 - name: Wait for Nextcloud to be ready run: | echo "Waiting for Nextcloud..." max_attempts=60 attempt=0 until curl -o /dev/null -s -w "%{http_code}\n" http://localhost:8080/ocs/v2.php/apps/serverinfo/api/v1/info | grep -q "401"; do attempt=$((attempt + 1)) if [ $attempt -ge $max_attempts ]; then echo "Service did not become ready in time." exit 1 fi echo "Attempt $attempt/$max_attempts: Service not ready, sleeping for 5 seconds..." sleep 5 done echo "Nextcloud is ready." - name: Wait for MCP server to be ready run: | echo "Waiting for MCP server..." max_attempts=30 attempt=0 until curl -o /dev/null -s -w "%{http_code}\n" http://localhost:8000/health/live | grep -q "200"; do attempt=$((attempt + 1)) if [ $attempt -ge $max_attempts ]; then echo "MCP server did not become ready in time." exit 1 fi echo "Attempt $attempt/$max_attempts: MCP not ready, sleeping for 2 seconds..." sleep 2 done echo "MCP server is ready." - name: Run RAG evaluation tests env: NEXTCLOUD_HOST: "http://localhost:8080" NEXTCLOUD_USERNAME: "admin" NEXTCLOUD_PASSWORD: "admin" RAG_MANUAL_PATH: ${{ inputs.manual_path }} OPENAI_API_KEY: ${{ secrets.GITHUB_TOKEN }} OPENAI_BASE_URL: "https://models.github.ai/inference" OPENAI_EMBEDDING_MODEL: ${{ inputs.embedding_model }} OPENAI_GENERATION_MODEL: ${{ inputs.generation_model }} run: | uv run pytest tests/integration/test_rag.py -v --log-cli-level=INFO --provider openai - name: Capture MCP container logs if: always() run: | echo "=== MCP Container Logs ===" docker compose logs mcp --tail=500 - name: Upload test results if: always() uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 with: name: rag-evaluation-results path: | pytest-results.xml retention-days: 30

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/cbcoutinho/nextcloud-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server