# Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# ══════════════════════════════════════════════════════════════════════
# Reusable composite action: Install Ollama and cache models
#
# Installs the Ollama server, starts it in the background, pulls the
# requested models, and caches them between CI runs using
# actions/cache. The cache key includes the model list so pulling
# different models in different workflows produces separate caches.
#
# Usage:
#
# - uses: ./.github/actions/setup-ollama
# with:
# models: "gemma3:4b"
#
# The Ollama model blobs live under ~/.ollama/models. The cache
# restores this directory before pulling, so only new or updated
# models trigger a download.
# ══════════════════════════════════════════════════════════════════════
name: Setup Ollama
description: Install Ollama, pull models, and cache them between runs.
inputs:
models:
description: >-
Space-separated list of Ollama model tags to pull
(e.g. "gemma3:4b").
required: true
ollama-version:
description: >-
Ollama version to install. "latest" fetches the newest release.
required: false
default: latest
runs:
using: composite
steps:
# ── 1. Restore cached models ────────────────────────────────────
- name: Restore Ollama model cache
id: cache-ollama
uses: actions/cache@v4
with:
path: ~/.ollama/models
# Key includes the model list so different model sets get
# separate caches. The runner OS is included because model
# blobs are platform-independent but the directory layout
# could theoretically differ.
key: ollama-models-${{ runner.os }}-${{ inputs.models }}
# ── 2. Install Ollama ───────────────────────────────────────────
- name: Install Ollama
shell: bash
run: |
curl -fsSL https://ollama.com/install.sh | sh
echo "Ollama version: $(ollama --version)"
# ── 3. Start Ollama server ──────────────────────────────────────
- name: Start Ollama server
shell: bash
run: |
ollama serve &
# Wait for the server to be ready (up to 30 seconds).
for i in $(seq 1 30); do
if curl -sf http://localhost:11434/api/tags >/dev/null 2>&1; then
echo "Ollama server is ready"
break
fi
sleep 1
done
# Final check to ensure the server is ready before proceeding.
if ! curl -sf http://localhost:11434/api/tags >/dev/null 2>&1; then
echo "::error::Ollama server failed to start after 30 seconds."
exit 1
fi
# ── 4. Pull models (skips if already cached) ────────────────────
- name: Pull Ollama models
shell: bash
env:
OLLAMA_MODELS: ${{ inputs.models }}
run: |
for model in $OLLAMA_MODELS; do
echo "::group::Pulling $model"
ollama pull "$model"
echo "::endgroup::"
done
echo "Available models:"
ollama list