We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/isc-tdyar/medical-graphrag-assistant'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
# AWS EC2 Configuration for FHIR GraphRAG
# This configuration connects to IRIS running on AWS EC2 g5.xlarge
# Database Connection Settings (AWS EC2)
database:
iris:
host: "localhost" # Local on EC2 (Docker container)
port: 32782 # IRIS SQL port (Docker mapped)
namespace: "%SYS" # Use %SYS namespace for reliable access on AWS
username: "_SYSTEM" # Database username
password: "SYS" # SuperUser password
connection_timeout: 30
pool_size: 5
max_overflow: 10
# BYOT Storage Configuration (use existing vector table on AWS)
# Note: Using SQLUser.ClinicalNoteVectors which already has vectorized data
storage:
iris:
table_name: "SQLUser.ClinicalNoteVectors"
column_mapping:
id_column: "ID"
text_column: "TextContent"
metadata_columns:
- "ResourceID"
- "PatientID"
- "DocumentType"
zero_copy: true
preserve_schema: true
validate_table_name: false # Disable validation since this is a custom table
# ✅ CloudConfiguration API vector settings (iris-vector-rag v0.5.4+)
# These settings are read by SchemaManager via CloudConfiguration
# Using 1024-dim for NVIDIA NIM embeddings (NV-EmbedQA-E5-v5)
vector_dimension: 1024 # Vector dimensionality for NVIDIA NIM embeddings
distance_metric: "COSINE" # Distance metric for similarity search
index_type: "HNSW" # Vector index type
# Vector Storage Configuration (AWS tables)
vector_storage:
table_name: "SQLUser.ClinicalNoteVectors"
reference_column: "ResourceID"
vector_column: "Embedding" # VECTOR(DOUBLE, 1024)
model_column: "EmbeddingModel"
dimension: 1024 # Legacy setting (CloudConfiguration uses storage.vector_dimension)
# Knowledge Graph Storage (AWS tables)
knowledge_graph:
entities_table: "RAG.Entities"
relationships_table: "RAG.EntityRelationships"
# Multimodal Embedding Configuration (NV-CLIP)
nvclip:
base_url: "http://13.216.2.41:8002/v1"
model: "nvidia/nvclip"
batch_size: 16
timeout: 60
# Embedding Configuration (NVIDIA NIM)
embeddings:
model: "nvidia/nv-embedqa-e5-v5" # NVIDIA NIM embedding model
base_url: "http://localhost:8001/v1"
dimension: 1024 # Legacy setting (CloudConfiguration uses storage.vector_dimension)
batch_size: 32
normalize: true
device: "cuda" # Use GPU on EC2 g5.xlarge
# LLM Configuration (NVIDIA NIM LLM)
llm:
provider: "nvidia_nim"
model: "meta/llama-3.1-8b-instruct"
base_url: "http://localhost:8001/v1" # NIM LLM service (local on EC2)
temperature: 0.0
max_tokens: 1000
timeout: 60
# Logging Configuration
logging:
level: "INFO"
format: "json"
file: "logs/fhir_graphrag_aws.log"
rotation: "daily"
max_bytes: 10485760
backup_count: 7
# Monitoring
monitoring:
enabled: true
metrics:
- "entity_extraction_time"
- "entity_extraction_count"
- "relationship_extraction_count"
- "query_latency"
- "graph_traversal_depth"
- "gpu_utilization" # AWS-specific: monitor GPU usage
- "network_latency" # AWS-specific: monitor network latency
performance_targets:
entity_extraction_time_ms: 1000 # Faster with GPU
query_latency_ms: 500 # Target sub-second queries
knowledge_graph_build_time_ms: 60000 # < 1 minute for 51 documents
# Feature Flags
features:
entity_normalization: false
temporal_analysis: false
entity_feedback: false
query_history: true # Enable for production monitoring
gpu_acceleration: true # AWS-specific: use GPU for embeddings
# AWS-Specific Settings
aws:
region: "us-east-1"
instance_id: "i-0432eba10b98c4949"
instance_type: "g5.xlarge"
gpu_type: "NVIDIA A10G"
enable_cloudwatch: false # CloudWatch metrics integration (future)