Skip to main content
Glama

Multi-Cloud Infrastructure MCP Server

by nomadslayer
init_clickhouse.sql3.98 kB
-- ClickHouse Schema Initialization for MCP -- Run this script to set up all required tables CREATE DATABASE IF NOT EXISTS mcp; USE mcp; -- Resources table: stores registered infrastructure CREATE TABLE IF NOT EXISTS resources ( resource_id String, provider String, gpu_name String, gpu_ram Float32, cpu_cores UInt16, region String, status String, price_hour Float32, metadata String, -- JSON blob registered_at DateTime DEFAULT now(), updated_at DateTime DEFAULT now() ) ENGINE = ReplacingMergeTree(updated_at) ORDER BY (provider, resource_id) PRIMARY KEY (provider, resource_id); -- GPU Rules table: selection matrix for optimal GPU choice CREATE TABLE IF NOT EXISTS gpu_rules ( id UInt32 DEFAULT rand(), gpu_family String, type String, -- Consumer, Server, Enterprise min_use_case String, optimal_use_case Nullable(String), excluded Nullable(String), power_rating String, typical_cloud_instance String, priority UInt8 DEFAULT 99, created_at DateTime DEFAULT now() ) ENGINE = ReplacingMergeTree(created_at) ORDER BY (gpu_family) PRIMARY KEY (gpu_family); -- Insert default GPU rules from spec INSERT INTO gpu_rules (gpu_family, type, min_use_case, optimal_use_case, excluded, power_rating, typical_cloud_instance, priority) VALUES ('RTX 3060', 'Consumer', 'basic inference', NULL, NULL, '170W', 'n/a', 3), ('RTX 4090', 'Consumer', 'optimal single GPU', 'training & inference', NULL, '450W', 'Vast.AI', 2), ('RTX 6000 Ada', 'Server', 'optimal production', 'multi-tenant workloads', NULL, '300W', 'Runpod', 1); -- Deployments table: track all deployment operations CREATE TABLE IF NOT EXISTS deployments ( deployment_id String, provider String, spec String, -- JSON blob status String, -- pending, running, failed, terminated created_at DateTime DEFAULT now(), updated_at DateTime DEFAULT now() ) ENGINE = ReplacingMergeTree(updated_at) ORDER BY (provider, deployment_id) PRIMARY KEY (provider, deployment_id); -- Events table: high-volume telemetry and audit log CREATE TABLE IF NOT EXISTS events ( event_id UInt64 DEFAULT rand64(), event_type String, payload String, -- JSON blob source String, timestamp DateTime DEFAULT now() ) ENGINE = MergeTree() ORDER BY (event_type, timestamp) PARTITION BY toYYYYMM(timestamp) TTL timestamp + INTERVAL 90 DAY; -- Auto-cleanup after 90 days -- Heartbeats table: resource health monitoring CREATE TABLE IF NOT EXISTS heartbeats ( resource_id String, payload String, -- JSON blob with metrics timestamp DateTime DEFAULT now() ) ENGINE = MergeTree() ORDER BY (resource_id, timestamp) PARTITION BY toYYYYMMDD(timestamp) TTL timestamp + INTERVAL 7 DAY; -- Keep only recent heartbeats -- Create indexes for common queries -- (ClickHouse uses ORDER BY as primary index) -- Materialized views for analytics (optional) -- Active resources by provider CREATE MATERIALIZED VIEW IF NOT EXISTS mv_active_resources ENGINE = SummingMergeTree() ORDER BY (provider, status) AS SELECT provider, status, count() as resource_count, avg(price_hour) as avg_price FROM resources WHERE status IN ('running', 'active', 'online') GROUP BY provider, status; -- Deployment success rate by provider CREATE MATERIALIZED VIEW IF NOT EXISTS mv_deployment_stats ENGINE = SummingMergeTree() ORDER BY (provider, toDate(created_at)) AS SELECT provider, toDate(created_at) as date, status, count() as count FROM deployments GROUP BY provider, date, status; -- Event volume by type CREATE MATERIALIZED VIEW IF NOT EXISTS mv_event_volume ENGINE = SummingMergeTree() ORDER BY (event_type, toStartOfHour(timestamp)) AS SELECT event_type, toStartOfHour(timestamp) as hour, count() as event_count FROM events GROUP BY event_type, hour; -- Grants (adjust based on your user setup) -- GRANT SELECT, INSERT, ALTER, DELETE ON mcp.* TO mcp_user; SHOW TABLES FROM mcp;

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/nomadslayer/infra-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server