name: Claude PR Review (labeled)
on:
workflow_dispatch:
inputs:
pr:
description: 'PR number'
required: true
label:
description: 'claude:review or claude:ultra'
required: true
source:
description: 'Dispatch source for debugging'
required: false
default: 'gate'
pull_request:
types: [reopened] # optional: manual nudge path for reopened PRs
concurrency:
group: claude-pr-labeled-${{ github.event.inputs.pr || github.event.pull_request.number }}
cancel-in-progress: true
permissions:
contents: read
pull-requests: write
issues: write
actions: read
checks: read
jobs:
review:
# Run for workflow_dispatch or PR reopened (with Claude label)
if: |
(github.event_name == 'workflow_dispatch') ||
(github.event_name == 'pull_request' && github.event.action == 'reopened' &&
(contains(github.event.pull_request.labels.*.name, 'claude:review') ||
contains(github.event.pull_request.labels.*.name, 'claude:ultra')))
runs-on: ubuntu-latest
concurrency:
group: pr-${{ github.event.inputs.pr || github.event.pull_request.number }}-claude-review
cancel-in-progress: true
steps:
- name: Resolve inputs
id: resolve
run: |
echo "pr=${{ github.event.inputs.pr || github.event.pull_request.number }}" >> $GITHUB_OUTPUT
echo "label=${{ github.event.inputs.label || 'claude:review' }}" >> $GITHUB_OUTPUT
echo "source=${{ github.event.inputs.source || 'reopened' }}" >> $GITHUB_OUTPUT
echo "🔍 Dispatch source: ${{ github.event.inputs.source || 'reopened' }}"
# Fast label check to short-circuit unrelated label churn
- name: Has claude label?
id: haslabel
uses: actions/github-script@v7
with:
script: |
const prNumber = '${{ steps.resolve.outputs.pr }}';
const labels = await github.paginate(github.rest.issues.listLabelsOnIssue, {
owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, per_page: 100
});
const names = new Set(labels.map(l => l.name));
const hasReview = names.has('claude:review');
const hasUltra = names.has('claude:ultra');
core.setOutput('has', String(hasReview || hasUltra));
core.setOutput('has_review', String(hasReview));
core.setOutput('has_ultra', String(hasUltra));
- name: Skip if not a Claude label
if: steps.haslabel.outputs.has != 'true' && github.event_name == 'pull_request'
run: exit 0
# Optional (audit egress): set to "audit" first; switch to "block" once stable
- name: Harden Runner (egress audit)
uses: step-security/harden-runner@v2
with:
egress-policy: audit
- name: Get PR info & determine environment
id: decide
if: steps.haslabel.outputs.has == 'true' || (github.event_name == 'pull_request' && github.event.action == 'reopened') || github.event_name == 'workflow_dispatch'
uses: actions/github-script@v7
with:
script: |
// Get PR number from resolved inputs
const prNumber = '${{ steps.resolve.outputs.pr }}';
if (!prNumber) {
core.setFailed('No PR number resolved'); return;
}
// Always fetch the PR fresh
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber
});
const internal = pr.head.repo.full_name === `${context.repo.owner}/${context.repo.repo}`;
const changed = pr.changed_files || 0;
const additions = pr.additions || 0;
const deletions = pr.deletions || 0;
const totalLines = additions + deletions;
// Optional debug (shows up in logs)
core.info(`PR #${pr.number}: internal=${internal}, changed=${changed} files, lines=${totalLines}`);
core.setOutput('internal', String(internal));
core.setOutput('pr', String(pr.number));
core.setOutput('sha', pr.head.sha);
core.setOutput('changed', String(changed));
core.setOutput('lines', String(totalLines));
core.setOutput('base_sha', pr.base.sha);
core.setOutput('base_ref', pr.base.ref);
- name: Calculate turn limits based on PR size
id: turns
run: |
lines=${{ steps.decide.outputs.lines || 0 }}
files=${{ steps.decide.outputs.changed || 0 }}
# Scale turns with diff size (bigger diff → more turns)
if [ $lines -le 400 ]; then sonnet_cap=22; opus_cap=26
elif [ $lines -le 1500 ]; then sonnet_cap=32; opus_cap=38
elif [ $lines -le 4000 ]; then sonnet_cap=44; opus_cap=52
else sonnet_cap=56; opus_cap=64
fi
# Calculate cutoff turns (when to stop using tools)
sonnet_cutoff=$((sonnet_cap - 10))
opus_cutoff=$((opus_cap - 12))
echo "sonnet_turns=$sonnet_cap" >> $GITHUB_OUTPUT
echo "opus_turns=$opus_cap" >> $GITHUB_OUTPUT
echo "sonnet_cutoff=$sonnet_cutoff" >> $GITHUB_OUTPUT
echo "opus_cutoff=$opus_cutoff" >> $GITHUB_OUTPUT
# Summary for maintainers in both logs and step summary
summary="🎯 PR size: $lines lines, $files files → Sonnet: $sonnet_cap turns, Opus: $opus_cap turns"
echo "$summary"
echo "$summary" >> $GITHUB_STEP_SUMMARY
# Fork PRs won't have repo secrets -> comment guidance and skip
- name: Handle forks (no secrets in PR context)
if: steps.decide.outputs.internal != 'true'
uses: actions/github-script@v7
with:
script: |
const prNumber = Number('${{ steps.decide.outputs.pr }}');
const body =
`🔒 This is a fork PR. For security, secrets are not available in PR jobs.\n\n` +
`To run Claude reviews with full checkout, use **Actions → Claude Trusted Review (manual)** on PR #${prNumber}, or approve a maintainer-run workflow.`;
await github.rest.issues.createComment({
owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, body
});
- name: Exit on forks
if: steps.decide.outputs.internal != 'true'
run: exit 0
- name: Checkout PR HEAD
if: steps.decide.outputs.internal == 'true' && (steps.haslabel.outputs.has == 'true' || github.event_name == 'workflow_dispatch')
uses: actions/checkout@v4
with:
ref: ${{ steps.decide.outputs.sha }}
fetch-depth: 0
persist-credentials: false # defensive; we're not pushing
- name: Compute review scope (Ring 0 / Ring 1)
if: steps.decide.outputs.internal == 'true' && (steps.haslabel.outputs.has == 'true' || github.event_name == 'workflow_dispatch')
id: scope
run: |
node scripts/claude/generate-review-scope.mjs
env:
INPUT_BASE: ${{ steps.decide.outputs.base_sha || '' }}
INPUT_HEAD: ${{ steps.decide.outputs.sha || '' }}
OUTPUT_DIR: .github/claude-cache
RING1_MAX: 200
- name: Apply sparse checkout scope
if: steps.decide.outputs.internal == 'true' && (steps.haslabel.outputs.has == 'true' || github.event_name == 'workflow_dispatch')
id: sparse
shell: bash
run: |
set -euo pipefail
SCOPE_DIR=".github/claude-cache/${{ github.run_id }}"
RING0_JSON_PATH="$SCOPE_DIR/ring0.json"
RING1_JSON_PATH="$SCOPE_DIR/ring1.json"
SUMMARY_PATH="$SCOPE_DIR/scope-summary.json"
if [ ! -f "$SUMMARY_PATH" ]; then
echo "scope-summary.json missing; skipping sparse checkout"
exit 0
fi
ring0_json=$(jq -c '.' "$RING0_JSON_PATH")
ring1_json=$(jq -c '.' "$RING1_JSON_PATH")
# Normalize .js -> .ts for source paths; keeps everything JSON-safe
ring1_json=$(jq -c 'map(if endswith(".js") then sub("\\.js$"; ".ts") else . end)' <<< "$ring1_json")
fallback=$(jq -r '.fallback // false' "$SUMMARY_PATH")
printf 'RING0_JSON=%s\n' "$ring0_json" >> "$GITHUB_ENV"
printf 'RING1_JSON=%s\n' "$ring1_json" >> "$GITHUB_ENV"
printf 'RING_SCOPE_FALLBACK=%s\n' "$fallback" >> "$GITHUB_ENV"
ring0_count=$(jq -r '.ring0Count' "$SUMMARY_PATH")
ring1_count=$(jq -r '.ring1Count' "$SUMMARY_PATH")
base_ref=$(jq -r '.baseRef // empty' "$SUMMARY_PATH")
head_ref=$(jq -r '.headRef // empty' "$SUMMARY_PATH")
{
echo "📂 Claude scope: Ring 0 = ${ring0_count}, Ring 1 = ${ring1_count}, fallback=${fallback}";
if [ -n "$base_ref" ] || [ -n "$head_ref" ]; then
echo "Base: ${base_ref:-unknown} → Head: ${head_ref:-unknown}";
fi
} | tee -a "$GITHUB_STEP_SUMMARY"
# Prepare sparse checkout patterns (include limited defaults for workflow internals)
tmpfile=$(mktemp)
{
echo '/.github/**'
echo '/scripts/claude/**'
jq -r '.[]' "$RING0_JSON_PATH"
jq -r '.[]' "$RING1_JSON_PATH"
} | sed '/^\s*$/d' | sort -u > "$tmpfile"
if [ ! -s "$tmpfile" ]; then
echo "No scope paths detected; skipping sparse checkout"
rm -f "$tmpfile"
exit 0
fi
# Init with sparse index for performance
git sparse-checkout init --sparse-index
# Silence the sparse index expanding hint for cleaner logs
git config --local advice.sparseIndexExpanded false || true
# Use non-cone mode so individual file patterns are honored
if git sparse-checkout set -h 2>&1 | grep -q -- '--no-cone'; then
git sparse-checkout set --no-cone --stdin < "$tmpfile"
else
# Legacy fallback: disable cone via config, then set patterns
git config --local core.sparseCheckoutCone false
git sparse-checkout set --stdin < "$tmpfile"
fi
# Sanity check
echo "Sparse checkout patterns applied:"
git sparse-checkout list || true
rm -f "$tmpfile"
- name: Claude Review (Sonnet)
if: steps.decide.outputs.internal == 'true' && (steps.haslabel.outputs.has_review == 'true' || (github.event_name == 'workflow_dispatch' && steps.resolve.outputs.label == 'claude:review'))
id: claude_sonnet
timeout-minutes: 12
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
github_token: ${{ github.token }}
claude_args: >-
--model claude-sonnet-4-5
--max-turns ${{ steps.turns.outputs.sonnet_turns }}
--allowed-tools Read,Glob,Grep
--output-format stream-json
prompt: |
IMPORTANT EXECUTION RULES
- Do NOT run `npm install` or add dependencies; if tests/lint aren't available, skip them.
- By turn ${{ steps.turns.outputs.sonnet_cutoff }}, STOP using tools and write the final Markdown report.
- Always emit a single final report with sections:
Summary • Top Findings • Suggested Tests • Quick Refactors • Perf/Sec Notes • Risk Level
- End the report with the exact marker: END-OF-REPORT
- Start the report with the heading `# PR Review Report`.
- Respect the scope rules below. If you cannot remain in scope, say so explicitly.
TOOLS POLICY (hard rule)
- Use only: Read, Glob, Grep. Do NOT use Bash, Web*, NotebookEdit, or TodoWrite.
- If a command/build/test is needed, SKIP it and write the report instead.
- If you can't find a symbol after 3 tool calls, stop tool use and write the report.
SCOPE RULES
- Ring 0: Files changed in the PR (JSON supplied separately); findings must cite Ring 0 paths only.
- Ring 1: Neighbor/test/import context only. Use Ring 1 to reason, never as independent findings.
- Ignore absolute/tsconfig-path imports. Stay within the sparse checkout tree.
- If Ring 1 is unavailable, work with Ring 0 alone rather than expanding scope.
- Current fallback flag: ${{ env.RING_SCOPE_FALLBACK }}
PATH RESOLUTION RULES
- Source files use ESM imports like "../foo.js" but live on disk as "../foo.ts".
- When a Read(...) of a "*.js" path fails, immediately retry the same path with ".ts".
- Never treat missing "*.js" as missing code if a ".ts" twin exists.
Changed files (Ring 0 JSON):
${{ env.RING0_JSON }}
Context files (Ring 1 JSON):
${{ env.RING1_JSON }}
Perform a PR review with full working tree access.
Enforce:
## Code Quality Standards
### Single Responsibility Principle (SRP)
- One class/module → one reason to change. Detect mixed concerns, many dependencies, multiple change reasons.
### Size Limits & Refactoring Triggers
- Functions: ≤30–40 lines; Classes/Files: ≤500 lines; Methods per class: ≤20–30.
- Trigger refactor on complexity or mixed concerns.
### Refactoring Best Practices
- Small steps; test each change. Don't mix refactors with bug fixes.
- Deduplicate first; add focused tests.
### Universal Coding Standards
- TypeScript: prefer Record<string, unknown> over any
- PascalCase (classes/interfaces), camelCase (functions/variables)
- Imports: node → external → internal; remove unused
- Commits: conventional or repo override; keep #issue linkage
Output Markdown:
- **Summary**
- **Top Findings** (with code snippets/diffs)
- **Suggested Tests** (file + test name)
- **Quick Refactors** (incremental, SRP-aligned)
- **Perf/Sec Notes** (explicit quick wins)
- **Risk Level** with rationale
- name: Claude Review (Opus)
if: steps.decide.outputs.internal == 'true' && (steps.haslabel.outputs.has_ultra == 'true' || (github.event_name == 'workflow_dispatch' && steps.resolve.outputs.label == 'claude:ultra'))
id: claude_opus
timeout-minutes: 15
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
github_token: ${{ github.token }}
claude_args: >-
--model claude-opus-4-1-20250805
--max-turns ${{ steps.turns.outputs.opus_turns }}
--allowed-tools Read,Glob,Grep
--output-format stream-json
prompt: |
IMPORTANT EXECUTION RULES
- Do NOT run `npm install` or add dependencies; if tests/lint aren't available, skip them.
- By turn ${{ steps.turns.outputs.opus_cutoff }}, STOP using tools and write the final Markdown report.
- Always emit a single final report with sections:
Summary • Top Findings • Suggested Tests • Quick Refactors • Perf/Sec Notes • Risk Level
- End the report with the exact marker: END-OF-REPORT
- Start the report with the heading `# PR Review Report`.
- Respect the scope rules below. If you cannot remain in scope, say so explicitly.
TOOLS POLICY (hard rule)
- Use only: Read, Glob, Grep. Do NOT use Bash, Web*, NotebookEdit, or TodoWrite.
- If a command/build/test is needed, SKIP it and write the report instead.
- If you can't find a symbol after 3 tool calls, stop tool use and write the report.
SCOPE RULES
- Ring 0: Files changed in the PR (JSON supplied separately); findings must cite Ring 0 paths only.
- Ring 1: Neighbor/test/import context only. Use Ring 1 to reason, never as independent findings.
- Ignore absolute/tsconfig-path imports. Stay within the sparse checkout tree.
- If Ring 1 is unavailable, work with Ring 0 alone rather than expanding scope.
- Current fallback flag: ${{ env.RING_SCOPE_FALLBACK }}
PATH RESOLUTION RULES
- Source files use ESM imports like "../foo.js" but live on disk as "../foo.ts".
- When a Read(...) of a "*.js" path fails, immediately retry the same path with ".ts".
- Never treat missing "*.js" as missing code if a ".ts" twin exists.
Changed files (Ring 0 JSON):
${{ env.RING0_JSON }}
Context files (Ring 1 JSON):
${{ env.RING1_JSON }}
Perform a PR review with full working tree access.
Enforce:
## Code Quality Standards
### Single Responsibility Principle (SRP)
- One class/module → one reason to change. Detect mixed concerns, many dependencies, multiple change reasons.
### Size Limits & Refactoring Triggers
- Functions: ≤30–40 lines; Classes/Files: ≤500 lines; Methods per class: ≤20–30.
- Trigger refactor on complexity or mixed concerns.
### Refactoring Best Practices
- Small steps; test each change. Don't mix refactors with bug fixes.
- Deduplicate first; add focused tests.
### Universal Coding Standards
- TypeScript: prefer Record<string, unknown> over any
- PascalCase (classes/interfaces), camelCase (functions/variables)
- Imports: node → external → internal; remove unused
- Commits: conventional or repo override; keep #issue linkage
Output Markdown:
- **Summary**
- **Top Findings** (with code snippets/diffs)
- **Suggested Tests** (file + test name)
- **Quick Refactors** (incremental, SRP-aligned)
- **Perf/Sec Notes** (explicit quick wins)
- **Risk Level** with rationale
If mode is **ultra**, go deeper on security, performance, concurrency, error-handling, and propose minimal diffs.
- name: Capture Sonnet Review Output
if: steps.claude_sonnet.outcome == 'success' && steps.claude_sonnet.outputs.execution_file != ''
uses: actions/github-script@v7
env:
EXEC_FILE: ${{ steps.claude_sonnet.outputs.execution_file }}
with:
script: |
const fs = require('fs');
const execFile = process.env.EXEC_FILE;
if (!execFile || !fs.existsSync(execFile)) {
core.info('No Sonnet execution file to parse.');
return;
}
const raw = fs.readFileSync(execFile, 'utf8');
const deltas = [];
for (const line of raw.split(/\r?\n/)) {
const trimmed = line.trim();
if (!trimmed) continue;
try {
const parsed = JSON.parse(trimmed);
if (parsed?.delta?.text) {
deltas.push(parsed.delta.text);
} else if (typeof parsed?.text === 'string') {
deltas.push(parsed.text);
}
} catch {}
}
let review = deltas.join('');
// Normalize newline before header for consistent parsing
const idx = review.search(/#[ \t]*PR Review Report\b/i);
if (idx > 0 && review[idx - 1] !== '\n') {
review = review.slice(0, idx) + '\n' + review.slice(idx);
}
if (!review) {
const regex = /"text"\s*:\s*"((?:\\.|[^"\r\n])*)"/g;
const pieces = [];
let match;
while ((match = regex.exec(raw)) !== null) {
const text = match[1]
.replace(/\\n/g, '\n')
.replace(/\\"/g, '"')
.replace(/\\\\/g, '\\');
pieces.push(text);
}
review = pieces.join('');
// Also normalize for fallback regex extraction
const fallbackIdx = review.search(/#[ \t]*PR Review Report\b/i);
if (fallbackIdx > 0 && review[fallbackIdx - 1] !== '\n') {
review = review.slice(0, fallbackIdx) + '\n' + review.slice(fallbackIdx);
}
}
const trimReport = (input) => {
if (!input) return '';
// Find the first markdown header with "PR Review Report" anywhere in the string
const m = input.match(/#[ \t]*PR Review Report\b/i); // no newline required
if (m && typeof m.index === 'number') input = input.slice(m.index);
// Fallback: if a hash exists before any newline but no match above, still cut to first '#'
if (!m) {
const firstHash = input.indexOf('#');
if (firstHash > -1) input = input.slice(firstHash);
}
const endIndex = input.indexOf('END-OF-REPORT');
if (endIndex !== -1) input = input.slice(0, endIndex);
return input.trim();
};
review = trimReport(review);
if (!review) {
core.warning('No Sonnet review content found after trimming.');
return;
}
// Require a proper report header; otherwise don't export anything
if (!/^#\s*PR Review Report\b/m.test(review)) {
core.warning('No PR Review header detected; skipping RESULT_SONNET export.');
return;
}
core.exportVariable('RESULT_SONNET', review);
core.info(`Captured Sonnet review content (${review.length} chars).`);
- name: Capture Opus Review Output
if: steps.claude_opus.outcome == 'success' && steps.claude_opus.outputs.execution_file != ''
uses: actions/github-script@v7
env:
EXEC_FILE: ${{ steps.claude_opus.outputs.execution_file }}
with:
script: |
const fs = require('fs');
const execFile = process.env.EXEC_FILE;
if (!execFile || !fs.existsSync(execFile)) {
core.info('No Opus execution file to parse.');
return;
}
const raw = fs.readFileSync(execFile, 'utf8');
const deltas = [];
for (const line of raw.split(/\r?\n/)) {
const trimmed = line.trim();
if (!trimmed) continue;
try {
const parsed = JSON.parse(trimmed);
if (parsed?.delta?.text) {
deltas.push(parsed.delta.text);
} else if (typeof parsed?.text === 'string') {
deltas.push(parsed.text);
}
} catch {}
}
let review = deltas.join('');
// Normalize newline before header for consistent parsing
const idx = review.search(/#[ \t]*PR Review Report\b/i);
if (idx > 0 && review[idx - 1] !== '\n') {
review = review.slice(0, idx) + '\n' + review.slice(idx);
}
if (!review) {
const regex = /"text"\s*:\s*"((?:\\.|[^"\r\n])*)"/g;
const pieces = [];
let match;
while ((match = regex.exec(raw)) !== null) {
const text = match[1]
.replace(/\\n/g, '\n')
.replace(/\\"/g, '"')
.replace(/\\\\/g, '\\');
pieces.push(text);
}
review = pieces.join('');
// Also normalize for fallback regex extraction
const fallbackIdx = review.search(/#[ \t]*PR Review Report\b/i);
if (fallbackIdx > 0 && review[fallbackIdx - 1] !== '\n') {
review = review.slice(0, fallbackIdx) + '\n' + review.slice(fallbackIdx);
}
}
const trimReport = (input) => {
if (!input) return '';
// Find the first markdown header with "PR Review Report" anywhere in the string
const m = input.match(/#[ \t]*PR Review Report\b/i); // no newline required
if (m && typeof m.index === 'number') input = input.slice(m.index);
// Fallback: if a hash exists before any newline but no match above, still cut to first '#'
if (!m) {
const firstHash = input.indexOf('#');
if (firstHash > -1) input = input.slice(firstHash);
}
const endIndex = input.indexOf('END-OF-REPORT');
if (endIndex !== -1) input = input.slice(0, endIndex);
return input.trim();
};
review = trimReport(review);
if (!review) {
core.warning('No Opus review content found after trimming.');
return;
}
// Require a proper report header; otherwise don't export anything
if (!/^#\s*PR Review Report\b/m.test(review)) {
core.warning('No PR Review header detected; skipping RESULT_OPUS export.');
return;
}
core.exportVariable('RESULT_OPUS', review);
core.info(`Captured Opus review content (${review.length} chars).`);
- name: Debug review env
run: |
echo "RESULT_SONNET chars:" $(printf %s "$RESULT_SONNET" | wc -c)
echo "RESULT_OPUS chars:" $(printf %s "$RESULT_OPUS" | wc -c)
ls -l "${{ steps.claude_sonnet.outputs.execution_file }}" 2>/dev/null || true
ls -l "${{ steps.claude_opus.outputs.execution_file }}" 2>/dev/null || true
- name: Post PR review (robust)
if: always() && steps.decide.outputs.internal == 'true' && (steps.haslabel.outputs.has == 'true' || github.event_name == 'workflow_dispatch')
uses: actions/github-script@v7
env:
EXEC_FILE_SONNET: ${{ steps.claude_sonnet.outputs.execution_file }}
EXEC_FILE_OPUS: ${{ steps.claude_opus.outputs.execution_file }}
with:
script: |
const fs = require('fs');
const LIMIT = 65000;
const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms));
async function withRetries(fn, label, { attempts = 3, baseDelayMs = 1000 } = {}) {
let lastError;
for (let attempt = 1; attempt <= attempts; attempt++) {
try {
return await fn();
} catch (error) {
lastError = error;
const status = error?.status || error?.response?.status;
const retryable = !status || status >= 500;
if (!retryable || attempt === attempts) {
throw error;
}
const delay = baseDelayMs * attempt;
core.warning(`Attempt ${attempt} for ${label} failed (${error.message || error}). Retrying in ${delay}ms...`);
await sleep(delay);
}
}
throw lastError;
}
// Event-agnostic PR number detection
const prNumber = Number('${{ steps.resolve.outputs.pr }}');
if (!prNumber) {
core.info('No PR number found; skipping comment posting.');
return;
}
const headSha = '${{ steps.decide.outputs.sha }}';
const runTag = `<!-- claude-run-${context.runId} -->`;
const marker = `<!-- claude-review sha=${headSha} -->`;
const comments = await withRetries(() =>
github.paginate(github.rest.issues.listComments, {
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
per_page: 100
}),
'github.paginate issues.listComments'
);
// Prefer updating the most recent Claude comment (if any)
let prior = null;
for (const c of [...comments].reverse()) {
const body = c.body || '';
if (c.user?.type === 'Bot' && /<!--\s*claude-review\s+sha=/.test(body)) {
prior = c;
break;
}
}
let priorSha = null;
let sameSha = false;
if (prior) {
const priorShaMatch = (prior.body || '').match(/<!--\s*claude-review\s+sha=([^\s>]+)\s*-->/);
priorSha = priorShaMatch ? priorShaMatch[1] : null;
sameSha = priorSha === headSha;
core.info(`Prior Claude review SHA=${priorSha || 'none'}, current SHA=${headSha}, sameSha=${sameSha}`);
} else {
core.info('No prior Claude review comment found.');
}
// Prefer the deeper Opus pass if both exist
let body = (process.env.RESULT_OPUS || process.env.RESULT_SONNET || '').trim();
// Robust extractor: handles delta.text, text nodes, arrays, messages, etc.
function extractText(node, out) {
if (!node) return;
if (Array.isArray(node)) { node.forEach(n => extractText(n, out)); return; }
if (typeof node === 'object') {
// Anthropic stream deltas - prefer this over plain text
if (node.delta && typeof node.delta.text === 'string') {
out.push(node.delta.text);
// Don't process plain text if we found delta.text to avoid duplication
} else {
// Plain text nodes (only if no delta.text was found)
if (node.type === 'text' && typeof node.text === 'string') out.push(node.text);
// Generic: capture any string-valued "text" (only if no delta)
else if (Object.prototype.hasOwnProperty.call(node, 'text') && typeof node.text === 'string') out.push(node.text);
}
// Recurse into nested structures (but skip delta to avoid double-processing)
for (const k of ['content','turns','messages','data','message']) {
if (node[k] !== undefined) extractText(node[k], out);
}
// Only recurse into delta if it doesn't have text (to avoid reprocessing delta.text)
if (node.delta && !node.delta.text) extractText(node.delta, out);
}
}
if (!body) {
const execFile = process.env.EXEC_FILE_SONNET || process.env.EXEC_FILE_OPUS;
if (execFile && fs.existsSync(execFile)) {
try {
const raw = fs.readFileSync(execFile, 'utf8').trim();
const chunks = [];
// First try NDJSON (stream JSON)
for (const line of raw.split(/\r?\n/)) {
const s = line.trim();
if (!s) continue;
try {
extractText(JSON.parse(s), chunks);
} catch {}
}
// Fallback: single JSON blob
if (!chunks.length) {
try { extractText(JSON.parse(raw), chunks); } catch {}
}
// Join deltas without injecting extra newlines
body = chunks.join('').trim();
// Normalize newline before header for consistent parsing
const fallbackIdx = body.search(/#[ \t]*PR Review Report\b/i);
if (fallbackIdx > 0 && body[fallbackIdx - 1] !== '\n') {
body = body.slice(0, fallbackIdx) + '\n' + body.slice(fallbackIdx);
}
// Cut at the sentinel if present
const end = body.indexOf('END-OF-REPORT');
if (end !== -1) body = body.slice(0, end);
} catch (e) {
core.warning(`Failed to parse execution file: ${e.message}`);
}
}
}
if (!body) {
core.info('No review content found; skipping comment posting.');
return; // don't post placeholders
}
const trimReport = (input) => {
if (!input) return '';
// Find the first markdown header with "PR Review Report" anywhere in the string
const m = input.match(/#[ \t]*PR Review Report\b/i); // no newline required
if (m && typeof m.index === 'number') input = input.slice(m.index);
// Fallback: if a hash exists before any newline but no match above, still cut to first '#'
if (!m) {
const firstHash = input.indexOf('#');
if (firstHash > -1) input = input.slice(firstHash);
}
const endIndex = input.indexOf('END-OF-REPORT');
if (endIndex !== -1) input = input.slice(0, endIndex);
return input.trim();
};
body = trimReport(body);
// Don't post unless it's a real report
if (!/^#\s*PR Review Report\b/m.test(body)) {
core.info('No final report detected; skipping comment to avoid posting process narration.');
return;
}
const ring0Set = new Set();
try {
JSON.parse(process.env.RING0_JSON || '[]').forEach((file) => {
if (typeof file === 'string' && file) ring0Set.add(file);
});
} catch (error) {
core.warning(`Failed to parse RING0_JSON: ${error.message}`);
}
const outOfScope = [];
if (ring0Set.size) {
const filteredLines = [];
for (const line of body.split(/\n/)) {
const match = line.match(/([A-Za-z0-9_./\\-]+\.[A-Za-z0-9]+):\d/);
if (match) {
const rawPath = match[1].replace(/[)\]]+$/, '');
let normalizedPath = rawPath.replace(/\\/g, '/');
if (normalizedPath.startsWith('./')) normalizedPath = normalizedPath.slice(2);
if (!ring0Set.has(normalizedPath)) {
if (outOfScope.length < 3) outOfScope.push(line.trim());
continue;
}
}
filteredLines.push(line);
}
body = filteredLines.join('\n').trim();
}
if (!body) {
core.info('Review content empty after scope filtering; skipping comment posting.');
return;
}
if (outOfScope.length) {
body = `${body}\n\n### Optional, out-of-scope notes\n- ${outOfScope.join('\n- ')}`;
core.info(`Out-of-scope notes bucketed: ${outOfScope.length}`);
if (core?.summary?.addRaw) {
await core.summary.addRaw(`Claude scope filtering: out-of-scope notes bucketed = ${outOfScope.length}\n`).write();
}
}
// Compose final message with fresh markers
const finalBody = `${marker}\n${body}\n\n${runTag}`;
if (finalBody.length <= LIMIT) {
if (prior && sameSha) {
await withRetries(() => github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: prior.id,
body: finalBody
}), 'github.rest.issues.updateComment (final single)');
core.info('Updated existing Claude review comment (same SHA).');
} else {
await withRetries(() => github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: finalBody
}), 'github.rest.issues.createComment (final single)');
core.info(prior ? 'Created new Claude review comment (SHA changed).' : 'Posted new Claude review comment.');
}
} else {
// Chunk: update the first (or create it), then add append-only parts
const parts = [];
for (let i = 0; i < body.length; i += LIMIT) parts.push(body.slice(i, i + LIMIT));
const first = `${marker}\n${parts[0]}\n\n<!-- claude-run-${context.runId}-0/${parts.length} -->`;
if (prior && sameSha) {
await withRetries(() => github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: prior.id,
body: first
}), 'github.rest.issues.updateComment (chunk base)');
} else {
await withRetries(() => github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: first
}), 'github.rest.issues.createComment (chunk base)');
}
if (!(prior && sameSha)) {
core.info(prior ? `Created new Claude review (chunked) base comment due to SHA change. Total parts=${parts.length}.` : `Posted new Claude review comment (chunked). Total parts=${parts.length}.`);
} else {
core.info(`Updated existing Claude review comment (chunked, same SHA). Total parts=${parts.length}.`);
}
for (let i = 1; i < parts.length; i++) {
const piece = `${marker}\n${parts[i]}\n\n<!-- claude-run-${context.runId}-${i}/${parts.length} -->`;
await withRetries(() => github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: piece
}), `github.rest.issues.createComment (chunk ${i})`);
}
if (parts.length > 1) {
core.info(`Posted PR review in ${parts.length} parts.`);
}
}
# Remove claude label after review completes to prevent auto re-runs on subsequent pushes
- name: Remove claude label (prevents auto re-run)
if: always() && steps.decide.outputs.internal == 'true'
uses: actions/github-script@v7
with:
script: |
const prNumber = Number('${{ steps.resolve.outputs.pr }}');
// Check which label was applied by looking at completed steps
const sonnetRan = '${{ steps.claude_sonnet.outcome }}' !== '';
const opusRan = '${{ steps.claude_opus.outcome }}' !== '';
const labelsToRemove = [];
if (sonnetRan) labelsToRemove.push('claude:review');
if (opusRan) labelsToRemove.push('claude:ultra');
for (const label of labelsToRemove) {
try {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
name: label
});
core.info(`Removed ${label} label to prevent auto re-runs on future pushes.`);
} catch (e) {
if (e.status === 404) {
core.info(`Label ${label} was already removed.`);
} else {
core.warning(`Failed to remove label ${label}: ${e.message}`);
}
}
}