apply_fix
Apply a previewed SQL fix to a Supabase project after confirmation. Re-runs the security audit to verify the finding is resolved.
Instructions
ACTUALLY APPLY a fix SQL to the project. Requires confirm=true. Always run preview_fix first. Re-runs audit afterward to verify the finding is gone.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| project_ref | Yes | ||
| finding_index | Yes | ||
| confirm | Yes | Must be true to actually apply. Set to false to abort. |
Implementation Reference
- src/server.js:123-167 (registration)The 'apply_fix' tool is registered using server.registerTool with name 'apply_fix'. It expects inputSchema with project_ref (string), finding_index (number), and confirm (boolean). The handler validates confirm=true, looks up the finding from cache, extracts SQL, executes it via the sql() helper, re-audits to verify, and returns the result.
server.registerTool( "apply_fix", { description: "ACTUALLY APPLY a fix SQL to the project. Requires confirm=true. Always run preview_fix first. Re-runs audit afterward to verify the finding is gone.", inputSchema: { project_ref: z.string(), finding_index: z.number().int(), confirm: z.boolean().describe("Must be true to actually apply. Set to false to abort."), }, }, async ({ project_ref, finding_index, confirm }) => { if (!confirm) { return { content: [{ type: "text", text: "Aborted: confirm is false. Pass confirm=true to actually apply." }] }; } const c = cache.get(project_ref); if (!c) return { content: [{ type: "text", text: `No cached audit. Run audit_project first.` }], isError: true }; const f = c.result.findings[finding_index]; if (!f) return { content: [{ type: "text", text: `Finding index ${finding_index} out of range` }], isError: true }; const sqlOnly = f.fix_sql.split("\n").filter((l) => l.trim() && !l.trim().startsWith("--")).join("\n"); if (!sqlOnly) { return { content: [{ type: "text", text: `Finding "${f.title}" requires a Dashboard change. Cannot apply via SQL.\n\n${f.fix_sql}` }] }; } try { await sql(c.token, project_ref, sqlOnly); // Re-audit to verify const fresh = await audit(c.token, project_ref); cache.set(project_ref, { result: fresh, ts: Date.now(), token: c.token }); const stillThere = fresh.findings.some( (nf) => nf.check === f.check && nf.target === f.target ); return { content: [ { type: "text", text: stillThere ? `Applied SQL but finding still present after re-audit. Verify manually.` : `Applied. Re-audit confirms finding "${f.title}" on ${f.target} is gone. New summary: ${shortSummary(fresh)}` }, { type: "text", text: `SQL applied:\n\`\`\`sql\n${sqlOnly}\n\`\`\`` }, ], }; } catch (e) { return { content: [{ type: "text", text: `Apply FAILED: ${e.message}\n\nProject state unchanged.` }], isError: true }; } } ); - src/server.js:133-166 (handler)The handler function (the async callback passed to registerTool) that implements the core logic of apply_fix. It checks confirm=true, retrieves the finding from cache, filters out comment-only lines from fix_sql, executes the SQL, re-runs audit() to verify the fix worked, and returns success/failure messages.
async ({ project_ref, finding_index, confirm }) => { if (!confirm) { return { content: [{ type: "text", text: "Aborted: confirm is false. Pass confirm=true to actually apply." }] }; } const c = cache.get(project_ref); if (!c) return { content: [{ type: "text", text: `No cached audit. Run audit_project first.` }], isError: true }; const f = c.result.findings[finding_index]; if (!f) return { content: [{ type: "text", text: `Finding index ${finding_index} out of range` }], isError: true }; const sqlOnly = f.fix_sql.split("\n").filter((l) => l.trim() && !l.trim().startsWith("--")).join("\n"); if (!sqlOnly) { return { content: [{ type: "text", text: `Finding "${f.title}" requires a Dashboard change. Cannot apply via SQL.\n\n${f.fix_sql}` }] }; } try { await sql(c.token, project_ref, sqlOnly); // Re-audit to verify const fresh = await audit(c.token, project_ref); cache.set(project_ref, { result: fresh, ts: Date.now(), token: c.token }); const stillThere = fresh.findings.some( (nf) => nf.check === f.check && nf.target === f.target ); return { content: [ { type: "text", text: stillThere ? `Applied SQL but finding still present after re-audit. Verify manually.` : `Applied. Re-audit confirms finding "${f.title}" on ${f.target} is gone. New summary: ${shortSummary(fresh)}` }, { type: "text", text: `SQL applied:\n\`\`\`sql\n${sqlOnly}\n\`\`\`` }, ], }; } catch (e) { return { content: [{ type: "text", text: `Apply FAILED: ${e.message}\n\nProject state unchanged.` }], isError: true }; } } - src/server.js:127-131 (schema)Input schema for apply_fix using zod: project_ref (string), finding_index (number.int()), and confirm (boolean with description 'Must be true to actually apply. Set to false to abort.').
inputSchema: { project_ref: z.string(), finding_index: z.number().int(), confirm: z.boolean().describe("Must be true to actually apply. Set to false to abort."), }, - src/audit.js:77-89 (helper)The sql() helper function imported in server.js from ./audit.js. It runs SQL queries against a Supabase project's database via the Management API endpoint /v1/projects/{ref}/database/query. Used by apply_fix to execute the fix SQL commands.
async function sql(token, ref, query) { const r = await fetch(`${API}/projects/${ref}/database/query`, { method: "POST", headers: { Authorization: `Bearer ${token}`, "Content-Type": "application/json", "User-Agent": UA, }, body: JSON.stringify({ query }), }); if (!r.ok) throw new Error(`SQL ${r.status}: ${await r.text()}`); return r.json(); } - src/audit.js:165-419 (helper)The audit() helper function imported in server.js from ./audit.js. It scans a Supabase project for security issues and returns findings with fix_sql. apply_fix calls audit() after applying SQL to verify the finding is resolved.
async function audit(token, ref, opts = {}) { const { activeProbe = true } = opts; const findings = []; const meta = await getProjectMeta(token, ref); const supabaseUrl = `https://${ref}.supabase.co`; const anonKey = activeProbe ? await getAnonKey(token, ref) : null; const probeAvailable = !!anonKey; // 1. Tables: RLS status + policy count + anon grants const tables = await sql( token, ref, `SELECT c.relname AS table_name, c.relrowsecurity AS rls_enabled, (SELECT COUNT(*) FROM pg_policies p WHERE p.schemaname='public' AND p.tablename=c.relname) AS n_policies, has_table_privilege('anon', 'public.'||quote_ident(c.relname), 'SELECT') AS anon_select, has_table_privilege('anon', 'public.'||quote_ident(c.relname), 'INSERT') AS anon_insert, has_table_privilege('anon', 'public.'||quote_ident(c.relname), 'DELETE') AS anon_delete, has_table_privilege('authenticated', 'public.'||quote_ident(c.relname), 'SELECT') AS auth_select FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = 'public' AND c.relkind = 'r' ORDER BY c.relname;` ); for (const t of tables) { if (!t.rls_enabled && (t.anon_select || t.anon_insert || t.anon_delete)) { const finding = { check: "rls_disabled", ...CHECKS.rls_disabled, target: t.table_name, details: { anon_select: t.anon_select, anon_insert: t.anon_insert, anon_delete: t.anon_delete, }, fix_sql: `ALTER TABLE public.${t.table_name} ENABLE ROW LEVEL SECURITY;`, }; if (probeAvailable && t.anon_select) { finding.probe = await probeAnonAccess(supabaseUrl, anonKey, t.table_name); } findings.push(finding); } else if (t.rls_enabled && t.n_policies === 0 && (t.anon_select || t.auth_select)) { findings.push({ check: "rls_no_policies_with_anon_grants", ...CHECKS.rls_no_policies_with_anon_grants, target: t.table_name, details: { policies: 0, anon_select: t.anon_select, auth_select: t.auth_select }, fix_sql: `-- Optional hardening: revoke direct grants to make leak impossible even if RLS is disabled.\nREVOKE ALL ON public.${t.table_name} FROM anon, authenticated;`, }); } } // 2. SECURITY DEFINER functions: executable-by-anon AND missing search_path const funcs = await sql( token, ref, `SELECT p.proname AS function_name, p.prosecdef AS security_definer, pg_get_function_result(p.oid) AS return_type, has_function_privilege('anon', p.oid, 'EXECUTE') AS anon_execute, p.proconfig AS config FROM pg_proc p JOIN pg_namespace n ON n.oid = p.pronamespace WHERE n.nspname = 'public' AND p.prosecdef = true;` ); for (const f of funcs) { // Trigger functions (return trigger) are not exploitable via PostgREST if (f.return_type === "trigger") continue; if (f.anon_execute) { findings.push({ check: "function_security_definer_anon_executable", ...CHECKS.function_security_definer_anon_executable, target: f.function_name, details: { returns: f.return_type }, fix_sql: `REVOKE EXECUTE ON FUNCTION public.${f.function_name} FROM anon;`, }); } // SECURITY DEFINER without SET search_path = path-injection vector const hasSearchPath = Array.isArray(f.config) && f.config.some((c) => typeof c === "string" && c.toLowerCase().startsWith("search_path=")); if (!hasSearchPath) { findings.push({ check: "function_no_search_path", ...CHECKS.function_no_search_path, target: f.function_name, details: { returns: f.return_type, current_config: f.config }, fix_sql: `ALTER FUNCTION public.${f.function_name} SET search_path = public, pg_temp;`, }); } } // 2b. Realtime publication: tables exposed via supabase_realtime WebSocket let realtimeTables = []; try { realtimeTables = await sql( token, ref, `SELECT tablename FROM pg_publication_tables WHERE pubname = 'supabase_realtime' AND schemaname = 'public';` ); } catch { /* publication may not exist */ } const tableRlsMap = new Map(tables.map((t) => [t.table_name, t.rls_enabled])); for (const rt of realtimeTables) { const rls = tableRlsMap.get(rt.tablename); if (rls === false) { const finding = { check: "realtime_publication_no_rls", ...CHECKS.realtime_publication_no_rls, target: rt.tablename, details: { in_publication: "supabase_realtime", rls_enabled: false }, fix_sql: `ALTER TABLE public.${rt.tablename} ENABLE ROW LEVEL SECURITY;\n-- Or remove from publication: ALTER PUBLICATION supabase_realtime DROP TABLE public.${rt.tablename};`, }; if (probeAvailable) { finding.probe = await probeAnonAccess(supabaseUrl, anonKey, rt.tablename); } findings.push(finding); } } // 3. Default privileges still grant CRUD on future tables? // Supabase tables can be owned by either `postgres` (SQL/CLI migrations) or `supabase_admin` (Dashboard). // Check both. ACL string format: anon=arwdDxtm/owner — letters: a=insert r=select w=update d=delete D=truncate x=refs t=trigger m=maintain. const defaults = await sql( token, ref, `SELECT defaclrole::regrole::text AS owner_role, defaclacl::text AS acl FROM pg_default_acl d JOIN pg_namespace n ON n.oid = d.defaclnamespace WHERE n.nspname = 'public' AND d.defaclobjtype = 'r';` ); const ownersWithLeak = []; for (const ownerRole of ["postgres", "supabase_admin"]) { const row = defaults.find((d) => d.owner_role === ownerRole); if (!row) { // No custom default ACL = system grants apply (Supabase auto-grants CRUD to anon) ownersWithLeak.push(ownerRole); continue; } // Look for anon or authenticated still having CRUD letters (a=insert r=select w=update d=delete) const m = row.acl.match(/anon=([a-zA-Z]*)/); const auth = row.acl.match(/authenticated=([a-zA-Z]*)/); const hasCrud = (s) => s && /[arwd]/.test(s.replace(/[DxtmU]/g, "")); if (hasCrud(m && m[1]) || hasCrud(auth && auth[1])) { ownersWithLeak.push(ownerRole); } } if (ownersWithLeak.length > 0) { const fixes = []; if (ownersWithLeak.includes("postgres")) { fixes.push( `-- Tables created via SQL editor / migrations / CLI (owner = postgres):`, `ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE SELECT, INSERT, UPDATE, DELETE ON TABLES FROM anon, authenticated, service_role;`, `ALTER DEFAULT PRIVILEGES FOR ROLE postgres IN SCHEMA public REVOKE USAGE, SELECT ON SEQUENCES FROM anon, authenticated, service_role;`, ); } if (ownersWithLeak.includes("supabase_admin")) { fixes.push( ``, `-- Tables created via Supabase Dashboard (owner = supabase_admin) cannot be revoked from postgres role.`, `-- Toggle this in: Dashboard -> Project Settings -> Data API -> "Automatically expose new tables" = OFF`, `-- Or via Management API: PATCH /v1/projects/${ref}/postgrest with {"db_extra_search_path":"public", ...} (see docs).`, ); } findings.push({ check: "default_privileges_not_revoked", ...CHECKS.default_privileges_not_revoked, target: `schema:public (leaky owners: ${ownersWithLeak.join(", ")})`, details: { leaky_owner_roles: ownersWithLeak, note: "Supabase enforces revoke for all projects by Oct 30, 2026." }, fix_sql: fixes.join("\n"), }); } // 4. Public storage buckets const buckets = await getStorageBuckets(token, ref); for (const b of buckets) { if (b.public) { findings.push({ check: "storage_bucket_public", ...CHECKS.storage_bucket_public, target: `bucket:${b.name}`, details: { id: b.id }, fix_sql: `UPDATE storage.buckets SET public = false WHERE id = '${b.id}'; -- only if you don't need public CDN-style access`, }); } } // 5. Auth config — multiple checks const authCfg = await getAuthConfig(token, ref); if (authCfg) { if (authCfg.disable_signup === false && authCfg.mailer_autoconfirm === true) { findings.push({ check: "auth_signups_enabled_no_confirm", ...CHECKS.auth_signups_enabled_no_confirm, target: "auth:signups", details: { signups_enabled: true, autoconfirm: true }, fix_sql: `-- Dashboard: Auth -> Providers -> Email -> "Confirm email" = ON\n-- API: PATCH /v1/projects/${ref}/config/auth body {"mailer_autoconfirm": false}`, }); } if (authCfg.external_anonymous_users_enabled === true) { findings.push({ check: "anonymous_signins_enabled", ...CHECKS.anonymous_signins_enabled, target: "auth:anonymous", details: { external_anonymous_users_enabled: true }, fix_sql: `-- Dashboard: Auth -> Providers -> Anonymous Sign-Ins = OFF\n-- API: PATCH /v1/projects/${ref}/config/auth body {"external_anonymous_users_enabled": false}`, }); } if (typeof authCfg.password_min_length === "number" && authCfg.password_min_length < 8) { findings.push({ check: "weak_password_policy", ...CHECKS.weak_password_policy, target: "auth:password", details: { password_min_length: authCfg.password_min_length, password_required_characters: authCfg.password_required_characters }, fix_sql: `-- Dashboard: Auth -> Providers -> Email -> "Minimum password length" >= 8\n-- API: PATCH /v1/projects/${ref}/config/auth body {"password_min_length": 12, "password_required_characters": "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()"}`, }); } if (authCfg.security_captcha_enabled === false && authCfg.disable_signup === false) { findings.push({ check: "no_captcha_on_auth", ...CHECKS.no_captcha_on_auth, target: "auth:captcha", details: { security_captcha_enabled: false }, fix_sql: `-- Dashboard: Auth -> Settings -> Enable CAPTCHA (hCaptcha or Cloudflare Turnstile)\n-- API: PATCH /v1/projects/${ref}/config/auth body {"security_captcha_enabled": true, "security_captcha_provider": "hcaptcha", "security_captcha_secret": "<your_secret>"}`, }); } } // Sort findings by severity findings.sort((a, b) => SEVERITY_ORDER[a.severity] - SEVERITY_ORDER[b.severity]); const summary = findings.reduce( (acc, f) => ({ ...acc, [f.severity]: (acc[f.severity] || 0) + 1 }), { critical: 0, high: 0, medium: 0, low: 0, info: 0 } ); const probed = findings.filter((f) => f.probe).length; const confirmed = findings.filter((f) => f.probe?.confirmed).length; return { project_ref: ref, project_name: meta.name || ref, region: meta.region || "unknown", scanned_at: new Date().toISOString(), scanned_by: "supabase-security v0.3", active_probe: { enabled: probeAvailable, probed, confirmed }, summary, n_tables_scanned: tables.length, n_functions_scanned: funcs.length, n_buckets_scanned: buckets.length, findings, }; }