/*
* PostgreSQL Read-Only MCP Server
*
* Environment Variables:
* - POSTGRES_DB: Database name (default: postgres)
* - POSTGRES_USER: Database username (default: readonly_user)
* - POSTGRES_PASSWORD: Database password
* - QUERY_TIMEOUT_MS: Query timeout in milliseconds (default: 15000)
* - HOME: Home directory for SSH key location
*
* Security Note:
* This server enforces read-only access using PostgreSQL's transaction-level
* read-only mode. For best security, create a dedicated read-only database user:
*
* CREATE USER readonly_mcp WITH PASSWORD 'secure_password';
* GRANT CONNECT ON DATABASE your_database TO readonly_mcp;
* GRANT USAGE ON SCHEMA public TO readonly_mcp;
* GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly_mcp;
* ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO readonly_mcp;
*/
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
import pg from 'pg'
import fs from 'fs'
import { TunnelManager } from './tunnelManager.js'
import type {
SSHConfig,
TunnelConfig,
DBConfig,
QueryResultFormatted,
TableInfo,
TimeoutError,
} from './types.js'
const { Pool } = pg
// Configuration
const SSH_CONFIG: SSHConfig = {
host: process.env.SSH_HOST || 'your-ssh-host.amazonaws.com',
username: process.env.SSH_USER || 'ec2-user',
privateKey: fs.readFileSync(process.env.SSH_KEY_PATH || process.env.HOME + '/.ssh/id_rsa'),
}
const TUNNEL_CONFIG: TunnelConfig = {
srcPort: parseInt(process.env.TUNNEL_PORT || '5433'),
dstHost: process.env.RDS_HOST || 'your-database.rds.amazonaws.com',
dstPort: parseInt(process.env.RDS_PORT || '5432'),
}
const DB_CONFIG: DBConfig = {
host: 'localhost',
port: 5433,
database: process.env.POSTGRES_DB || 'postgres',
user: process.env.POSTGRES_USER || 'readonly_user',
password: process.env.POSTGRES_PASSWORD || '',
ssl: {
rejectUnauthorized: false,
},
}
let pool: pg.Pool | undefined
let tunnelManager: TunnelManager | undefined
const dbInfo: Record<string, any> = {}
// Query timeout (default 15 seconds, configurable via env)
const QUERY_TIMEOUT_MS = parseInt(process.env.QUERY_TIMEOUT_MS || '15000')
// Create or connect to shared SSH tunnel
async function setupSSHTunnel(): Promise<void> {
tunnelManager = new TunnelManager(SSH_CONFIG, TUNNEL_CONFIG)
await tunnelManager.connect()
}
// Helper function to validate queries - basic check before database-level protection
function isReadOnlyQuery(query: string): boolean {
// The real protection comes from the read-only transaction mode
// This is just a basic sanity check
let normalizedQuery = query.trim().toUpperCase()
// Skip past any leading comments
while (normalizedQuery.startsWith('--') || normalizedQuery.startsWith('/*')) {
if (normalizedQuery.startsWith('--')) {
// Skip single-line comment
const newlineIndex = normalizedQuery.indexOf('\n')
if (newlineIndex === -1) {
// Comment goes to end of query
return true // Assume safe if only comments
}
normalizedQuery = normalizedQuery.substring(newlineIndex + 1).trim()
} else if (normalizedQuery.startsWith('/*')) {
// Skip multi-line comment
const endIndex = normalizedQuery.indexOf('*/')
if (endIndex === -1) {
// Unclosed comment
return true // Assume safe
}
normalizedQuery = normalizedQuery.substring(endIndex + 2).trim()
}
}
// Only block queries that explicitly start with write operations
const definiteWriteCommands = [
'INSERT ',
'UPDATE ',
'DELETE ',
'DROP ',
'CREATE ',
'ALTER ',
'TRUNCATE ',
'GRANT ',
'REVOKE ',
'VACUUM ',
'REINDEX ',
'LOCK ',
]
for (const cmd of definiteWriteCommands) {
if (normalizedQuery.startsWith(cmd)) {
return false
}
}
return true
}
// Format result set as a table-like string
function formatResultsAsTable(rows: Record<string, any>[], fields: pg.FieldDef[]): string {
if (!rows || rows.length === 0) {
return 'No rows returned.'
}
// Get column names
const columns = fields.map((f) => f.name)
// Calculate max width for each column
const widths: Record<string, number> = {}
columns.forEach((col) => {
widths[col] = col.length
rows.forEach((row) => {
const val = String(row[col] ?? 'NULL')
widths[col] = Math.max(widths[col] ?? col.length, val.length)
})
widths[col] = Math.min(widths[col] ?? col.length, 50) // Cap column width
})
// Build the table
let table = '\n'
// Header
table += columns.map((col) => col.padEnd(widths[col] ?? col.length)).join(' | ') + '\n'
table += columns.map((col) => '-'.repeat(widths[col] ?? col.length)).join('-+-') + '\n'
// Rows
rows.forEach((row) => {
table +=
columns
.map((col) => {
const val = row[col] ?? 'NULL'
const str = String(val)
return str.length > 50
? str.substring(0, 47) + '...'
: str.padEnd(widths[col] ?? str.length)
})
.join(' | ') + '\n'
})
return table
}
// Execute query with timeout
async function executeQueryWithTimeout(
query: string,
params: any[] = [],
timeoutMs = QUERY_TIMEOUT_MS
): Promise<pg.QueryResult> {
if (!pool) {
throw new Error('Database pool not initialized')
}
// Get a client from the pool for better control
const client = await pool.connect()
// Set up timeout
let timeoutId: NodeJS.Timeout
const timeoutPromise = new Promise<never>((_, reject) => {
timeoutId = setTimeout(() => {
// Try to cancel the query
client.query('SELECT pg_cancel_backend(pg_backend_pid())').catch(() => {})
reject(new Error(`Query cancelled after ${timeoutMs / 1000} seconds`))
}, timeoutMs)
})
try {
// Race between query and timeout
const result = await Promise.race([client.query(query, params), timeoutPromise])
// Clear timeout if query succeeded
clearTimeout(timeoutId!)
return result
} catch (error) {
// Clear timeout on any error
clearTimeout(timeoutId!)
if (error instanceof Error && error.message.includes('cancelled')) {
// Create a helpful timeout error
const timeoutError = new Error(
`Query execution exceeded ${timeoutMs / 1000} second timeout and was cancelled.`
) as TimeoutError
timeoutError.isTimeout = true
timeoutError.suggestion = `This query is taking too long to execute. Please:
1. Use postgres_explain_query to analyze the query plan
2. Add appropriate indexes if needed
3. Limit the result set with LIMIT clause
4. Optimize joins and WHERE conditions
5. Consider breaking complex queries into smaller parts`
throw timeoutError
}
throw error
} finally {
client.release()
}
}
// Format query results for better readability
function formatQueryResults(result: pg.QueryResult): QueryResultFormatted {
const response: QueryResultFormatted = {
success: true,
rowCount: result.rowCount ?? 0,
summary: `Query executed successfully. Returned ${result.rowCount} row${
result.rowCount !== 1 ? 's' : ''
}.`,
}
if (result.rows.length > 0) {
// Add formatted table
response.table = formatResultsAsTable(result.rows, result.fields)
// Add JSON data
response.data = result.rows
// Add warning for large result sets
if (result.rows.length > 1000) {
response.warning = `Large result set (${result.rows.length} rows). Consider using LIMIT to reduce the data returned.`
}
}
return response
}
// Main server function
async function main() {
try {
// Create MCP server FIRST
const server = new Server(
{
name: 'postgres-readonly',
version: '2.0.0',
},
{
capabilities: {
tools: {},
},
}
)
// Set up the transport immediately
const transport = new StdioServerTransport()
await server.connect(transport)
console.error('MCP protocol handler ready')
// Set up handlers that don't require database yet
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: 'postgres_query',
description: `Execute any PostgreSQL query on the production database (read-only). Use this instead of psql commands. Supports SELECT, WITH, EXPLAIN, and all read operations. Add LIMIT clause to control result size. Queries timeout after ${
QUERY_TIMEOUT_MS / 1000
} seconds.`,
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description:
'The SQL query to execute. This replaces any psql -c command. Use LIMIT to control result size for large tables.',
},
},
required: ['query'],
},
},
{
name: 'postgres_list_tables',
description:
'List all tables in the database with row counts and sizes. Faster than querying information_schema directly.',
inputSchema: {
type: 'object',
properties: {
schema: {
type: 'string',
description: 'Schema name (default: public)',
default: 'public',
},
include_counts: {
type: 'boolean',
description: 'Include row counts (slower but more informative)',
default: false,
},
},
},
},
{
name: 'postgres_describe_table',
description:
'Get detailed schema information for a table including columns, types, constraints, indexes, and foreign keys.',
inputSchema: {
type: 'object',
properties: {
table_name: {
type: 'string',
description: 'Name of the table to describe',
},
schema: {
type: 'string',
description: 'Schema name (default: public)',
default: 'public',
},
},
required: ['table_name'],
},
},
{
name: 'postgres_analyze_table',
description:
'Analyze a table for potential data issues, statistics, and sample data. Useful for debugging data problems. Use sample_size to control how many rows to sample.',
inputSchema: {
type: 'object',
properties: {
table_name: {
type: 'string',
description: 'Table to analyze',
},
sample_size: {
type: 'number',
description:
'Number of rows to sample (default: 10, max recommended: 100)',
default: 10,
},
check_nulls: {
type: 'boolean',
description: 'Check for NULL values in NOT NULL columns',
default: true,
},
},
required: ['table_name'],
},
},
{
name: 'postgres_find_related',
description:
'Find foreign key relationships and related data for a specific record. Results are limited to 10 rows per relationship.',
inputSchema: {
type: 'object',
properties: {
table_name: {
type: 'string',
description: 'Source table name',
},
id_column: {
type: 'string',
description: 'ID column name (default: id)',
default: 'id',
},
id_value: {
type: 'string',
description: 'ID value to search for',
},
},
required: ['table_name', 'id_value'],
},
},
{
name: 'postgres_explain_query',
description:
'Get query execution plan to debug slow queries. Shows how PostgreSQL will execute the query. Use this when queries timeout or run slowly.',
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'Query to explain',
},
analyze: {
type: 'boolean',
description: `Run EXPLAIN ANALYZE (actually executes the query, subject to ${
QUERY_TIMEOUT_MS / 1000
} second timeout)`,
default: false,
},
},
required: ['query'],
},
},
],
}
})
// Handle tool calls
server.setRequestHandler(CallToolRequestSchema, async (request) => {
if (!pool) {
return {
content: [
{
type: 'text',
text: 'Error: Database connection is still initializing. Please try again in a moment.',
},
],
isError: true,
}
}
try {
const { name, arguments: args } = request.params
switch (name) {
case 'postgres_query': {
const { query } = args as { query: string }
// Basic sanity check
if (!isReadOnlyQuery(query)) {
throw new Error(
'Query appears to be a write operation. This server only allows read operations.'
)
}
const startTime = Date.now()
try {
const result = await executeQueryWithTimeout(query)
const duration = Date.now() - startTime
const formatted = formatQueryResults(result)
formatted.executionTime = `${duration}ms`
// Add helpful context
let response = formatted.summary + ` (${formatted.executionTime})`
if (formatted.warning) {
response += `\n\n⚠️ ${formatted.warning}`
}
if (formatted.table) {
response += '\n' + formatted.table
}
// Add JSON results for very small result sets
if (result.rows.length <= 10 && result.rows.length > 0) {
response += '\n\nJSON Results:\n'
response += JSON.stringify(result.rows, null, 2)
}
return {
content: [
{
type: 'text',
text: response,
},
],
}
} catch (error) {
const timeoutError = error as TimeoutError
if (timeoutError.isTimeout) {
return {
content: [
{
type: 'text',
text: `❌ ${timeoutError.message}\n\n${timeoutError.suggestion}`,
},
],
isError: true,
}
}
throw error
}
}
case 'postgres_list_tables': {
const { schema = 'public', include_counts = false } = args as {
schema?: string
include_counts?: boolean
}
let query = `
SELECT
t.table_schema,
t.table_name,
t.table_type
`
if (include_counts) {
query += `,
pg_size_pretty(pg_total_relation_size(quote_ident(t.table_schema)||'.'||quote_ident(t.table_name))) as total_size,
pg_size_pretty(pg_indexes_size(quote_ident(t.table_schema)||'.'||quote_ident(t.table_name))) as indexes_size,
pg_size_pretty(pg_total_relation_size(quote_ident(t.table_schema)||'.'||quote_ident(t.table_name)) - pg_indexes_size(quote_ident(t.table_schema)||'.'||quote_ident(t.table_name)) - COALESCE(pg_total_relation_size(quote_ident(t.table_schema)||'.'||quote_ident(t.table_name)||'_toast'), 0)) as toast_size
`
}
query += `
FROM information_schema.tables t
WHERE t.table_schema = $1
ORDER BY t.table_name
`
const result = await executeQueryWithTimeout(query, [schema])
const tables = result.rows as TableInfo[]
// If counts requested, get row counts
if (include_counts && tables.length > 0) {
for (const table of tables) {
try {
const countResult = await executeQueryWithTimeout(
`SELECT COUNT(*) FROM ${schema}.${table.table_name}`,
[],
5000 // Shorter timeout for counts
)
table.row_count = parseInt(countResult.rows[0]?.count || '0')
} catch (e) {
table.row_count = -1 // Indicate error
}
}
}
const formatted = formatQueryResults(result)
return {
content: [
{
type: 'text',
text: formatted.summary + '\n' + (formatted.table || ''),
},
],
}
}
case 'postgres_describe_table': {
const { table_name, schema = 'public' } = args as {
table_name: string
schema?: string
}
// Get column information
const columnsQuery = `
SELECT
column_name,
data_type,
is_nullable,
column_default,
character_maximum_length,
numeric_precision,
numeric_scale
FROM information_schema.columns
WHERE table_schema = $1 AND table_name = $2
ORDER BY ordinal_position
`
const columnsResult = await executeQueryWithTimeout(columnsQuery, [
schema,
table_name,
])
// Get indexes
const indexesQuery = `
SELECT
indexname,
indexdef
FROM pg_indexes
WHERE schemaname = $1 AND tablename = $2
ORDER BY indexname
`
const indexesResult = await executeQueryWithTimeout(indexesQuery, [
schema,
table_name,
])
// Get foreign keys
const fkQuery = `
SELECT
kcu.column_name,
ccu.table_schema AS foreign_table_schema,
ccu.table_name AS foreign_table_name,
ccu.column_name AS foreign_column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage AS ccu
ON ccu.constraint_name = tc.constraint_name
WHERE tc.constraint_type = 'FOREIGN KEY'
AND tc.table_schema = $1
AND tc.table_name = $2
`
const fkResult = await executeQueryWithTimeout(fkQuery, [
schema,
table_name,
])
let response = `Table: ${schema}.${table_name}\n\n`
response += 'COLUMNS:\n'
response += formatResultsAsTable(columnsResult.rows, columnsResult.fields)
if (indexesResult.rows.length > 0) {
response += '\nINDEXES:\n'
response += formatResultsAsTable(
indexesResult.rows,
indexesResult.fields
)
}
if (fkResult.rows.length > 0) {
response += '\nFOREIGN KEYS:\n'
response += formatResultsAsTable(fkResult.rows, fkResult.fields)
}
return {
content: [
{
type: 'text',
text: response,
},
],
}
}
case 'postgres_analyze_table': {
const {
table_name,
sample_size = 10,
check_nulls = true,
} = args as {
table_name: string
sample_size?: number
check_nulls?: boolean
}
const safeLimit = Math.min(Math.max(1, sample_size), 100)
// Get basic statistics
const statsQuery = `
SELECT
COUNT(*) as total_rows,
pg_size_pretty(pg_total_relation_size($1)) as table_size
FROM ${table_name}
`
const statsResult = await executeQueryWithTimeout(statsQuery, [table_name])
// Get sample data
const sampleQuery = `SELECT * FROM ${table_name} LIMIT $1`
const sampleResult = await executeQueryWithTimeout(sampleQuery, [safeLimit])
let response = `Analysis of table: ${table_name}\n\n`
response += `Total Rows: ${statsResult.rows[0]?.total_rows || 0}\n`
response += `Table Size: ${
statsResult.rows[0]?.table_size || 'Unknown'
}\n\n`
if (sampleResult.rows.length > 0) {
response += `Sample Data (${sampleResult.rows.length} rows):\n`
response += formatResultsAsTable(sampleResult.rows, sampleResult.fields)
// Check for common issues
if (check_nulls) {
const nullCounts: Record<string, number> = {}
const columns = Object.keys(sampleResult.rows[0] || {})
columns.forEach((col) => {
nullCounts[col] = sampleResult.rows.filter(
(row) => row[col] === null
).length
})
const nullColumns = Object.entries(nullCounts)
.filter(([_, count]) => count > 0)
.map(
([col, count]) =>
`${col}: ${count}/${sampleResult.rows.length}`
)
if (nullColumns.length > 0) {
response += `\nColumns with NULL values:\n${nullColumns.join(
'\n'
)}\n`
}
}
}
return {
content: [
{
type: 'text',
text: response,
},
],
}
}
case 'postgres_find_related': {
const {
table_name,
id_column = 'id',
id_value,
} = args as {
table_name: string
id_column?: string
id_value: string
}
// First, get foreign keys from this table
const outgoingFKQuery = `
SELECT
kcu.column_name,
ccu.table_schema AS foreign_table_schema,
ccu.table_name AS foreign_table_name,
ccu.column_name AS foreign_column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage AS ccu
ON ccu.constraint_name = tc.constraint_name
WHERE tc.constraint_type = 'FOREIGN KEY'
AND tc.table_name = $1
`
const outgoingFK = await executeQueryWithTimeout(outgoingFKQuery, [
table_name,
])
// Get foreign keys pointing to this table
const incomingFKQuery = `
SELECT
tc.table_schema,
tc.table_name,
kcu.column_name,
ccu.column_name AS referenced_column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage AS ccu
ON ccu.constraint_name = tc.constraint_name
WHERE tc.constraint_type = 'FOREIGN KEY'
AND ccu.table_name = $1
`
const incomingFK = await executeQueryWithTimeout(incomingFKQuery, [
table_name,
])
let response = `Related data for ${table_name}.${id_column} = ${id_value}\n\n`
// Get the main record
const mainQuery = `SELECT * FROM ${table_name} WHERE ${id_column} = $1 LIMIT 1`
const mainResult = await executeQueryWithTimeout(mainQuery, [id_value])
if (mainResult.rows.length === 0) {
return {
content: [
{
type: 'text',
text: `No record found with ${id_column} = ${id_value}`,
},
],
}
}
response += 'MAIN RECORD:\n'
response += formatResultsAsTable(mainResult.rows, mainResult.fields)
// Get related records from outgoing foreign keys
if (outgoingFK.rows.length > 0) {
response += '\n\nOUTGOING RELATIONSHIPS:\n'
for (const fk of outgoingFK.rows) {
const fkValue = mainResult.rows[0]?.[fk.column_name]
if (fkValue) {
const relatedQuery = `SELECT * FROM ${fk.foreign_table_schema}.${fk.foreign_table_name} WHERE ${fk.foreign_column_name} = $1 LIMIT 10`
try {
const relatedResult = await executeQueryWithTimeout(
relatedQuery,
[fkValue]
)
if (relatedResult.rows.length > 0) {
response += `\n${fk.column_name} -> ${fk.foreign_table_name}.${fk.foreign_column_name}:\n`
response += formatResultsAsTable(
relatedResult.rows,
relatedResult.fields
)
}
} catch (e) {
response += `\nError fetching ${fk.foreign_table_name}: ${e}\n`
}
}
}
}
// Get related records from incoming foreign keys
if (incomingFK.rows.length > 0) {
response += '\n\nINCOMING RELATIONSHIPS:\n'
for (const fk of incomingFK.rows) {
const relatedQuery = `SELECT * FROM ${fk.table_schema}.${fk.table_name} WHERE ${fk.column_name} = $1 LIMIT 10`
try {
const relatedResult = await executeQueryWithTimeout(
relatedQuery,
[id_value]
)
if (relatedResult.rows.length > 0) {
response += `\n${fk.table_name}.${fk.column_name} references this record:\n`
response += formatResultsAsTable(
relatedResult.rows,
relatedResult.fields
)
}
} catch (e) {
response += `\nError fetching ${fk.table_name}: ${e}\n`
}
}
}
return {
content: [
{
type: 'text',
text: response,
},
],
}
}
case 'postgres_explain_query': {
const { query, analyze = false } = args as {
query: string
analyze?: boolean
}
if (!isReadOnlyQuery(query)) {
throw new Error(
'Query appears to be a write operation. This server only allows read operations.'
)
}
const explainQuery = analyze
? `EXPLAIN (ANALYZE, BUFFERS, VERBOSE) ${query}`
: `EXPLAIN (VERBOSE) ${query}`
const startTime = Date.now()
try {
const result = await executeQueryWithTimeout(explainQuery)
const duration = Date.now() - startTime
let response = analyze
? `Query Execution Plan (with actual execution - ${duration}ms):\n\n`
: 'Query Execution Plan (estimated):\n\n'
// Combine all rows into a single explanation
const planLines = result.rows.map((row) => row['QUERY PLAN'])
response += planLines.join('\n')
// Add helpful tips for common performance issues
const planText = planLines.join(' ').toLowerCase()
const tips: string[] = []
if (planText.includes('seq scan')) {
tips.push('• Sequential scan detected - consider adding an index')
}
if (planText.includes('nested loop')) {
tips.push('• Nested loop join - might be slow for large datasets')
}
if (planText.includes('sort')) {
tips.push(
'• Sorting operation - consider indexing ORDER BY columns'
)
}
if (planText.includes('hash join') && planText.includes('batches')) {
tips.push(
'• Hash join using multiple batches - might need more work_mem'
)
}
if (tips.length > 0) {
response += '\n\nPerformance Tips:\n' + tips.join('\n')
}
return {
content: [
{
type: 'text',
text: response,
},
],
}
} catch (error) {
const timeoutError = error as TimeoutError
if (timeoutError.isTimeout) {
return {
content: [
{
type: 'text',
text: `❌ ${timeoutError.message}\n\n${timeoutError.suggestion}\n\nTip: Try running EXPLAIN without ANALYZE first to see the query plan without executing it.`,
},
],
isError: true,
}
}
throw error
}
}
default:
return {
content: [
{
type: 'text',
text: `Unknown tool: ${name}`,
},
],
isError: true,
}
}
} catch (error) {
console.error('Tool execution error:', error)
return {
content: [
{
type: 'text',
text: `Error: ${
error instanceof Error ? error.message : String(error)
}`,
},
],
isError: true,
}
}
})
// Now initialize the database connection in the background
console.error('Setting up SSH tunnel...')
await setupSSHTunnel()
console.error('Connecting to PostgreSQL...')
pool = new Pool(DB_CONFIG)
// Test the connection and set read-only mode
try {
const client = await pool.connect()
await client.query('SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY')
// Get database info
const versionResult = await client.query('SELECT version()')
const dbResult = await client.query('SELECT current_database(), current_user')
dbInfo.version = versionResult.rows[0]?.version
dbInfo.database = dbResult.rows[0]?.current_database
dbInfo.user = dbResult.rows[0]?.current_user
client.release()
console.error('Database connection established')
console.error(`Connected to: ${dbInfo.database} as ${dbInfo.user}`)
console.error(`PostgreSQL: ${dbInfo.version?.split(',')[0]}`)
} catch (error) {
console.error('Failed to connect to database:', error)
throw error
}
// Handle graceful shutdown
const shutdown = async () => {
console.error('\nShutting down...')
if (pool) {
await pool.end()
}
if (tunnelManager) {
await tunnelManager.disconnect()
}
process.exit(0)
}
process.on('SIGINT', shutdown)
process.on('SIGTERM', shutdown)
process.on('beforeExit', shutdown)
// Keep the process alive
process.stdin.resume()
} catch (error) {
console.error('Server initialization failed:', error)
process.exit(1)
}
}
// Run the server
main().catch((error) => {
console.error('Fatal error:', error)
process.exit(1)
})