#!/usr/bin/env python3
"""
Functional tests for dbt MCP Server
End-to-end functional tests simulating real Claude Code integration scenarios.
"""
import asyncio
import json
import os
import sys
import unittest
from pathlib import Path
from unittest.mock import patch
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from mcp_servers.dbt_server import DbtMCPServer
class TestFunctionalScenarios(unittest.IsolatedAsyncioTestCase):
"""Functional tests simulating real-world usage scenarios"""
async def asyncSetUp(self):
"""Set up test environment"""
os.environ.update({
"DBT_PROJECT_DIR": "/Users/ajdoyle/claude-data-stack-mcp/transform",
"DBT_PROFILES_DIR": "/Users/ajdoyle/claude-data-stack-mcp/transform/profiles/duckdb",
"DBT_PROFILE": "data_stack",
"DBT_TARGET": "dev",
"DUCKDB_PATH": "/Users/ajdoyle/claude-data-stack-mcp/data/warehouse/data_stack.duckdb",
"DBT_MCP_ENABLE_CLI_TOOLS": "true",
"DBT_MCP_ENABLE_DISCOVERY_TOOLS": "true",
"DBT_MCP_ENABLE_REMOTE_TOOLS": "true"
})
self.server = DbtMCPServer()
async def test_model_development_workflow(self):
"""Test complete model development workflow"""
print("\n🔄 Testing Model Development Workflow")
# 1. Discover existing models
models = await self.server._discover_models()
self.assertGreater(len(models), 0)
print(f"✅ Discovered {len(models)} models")
# 2. Get model details
model_name = models[0]['name']
details = await self.server._get_model_details(model_name)
self.assertIn('content', details)
print(f"✅ Retrieved details for {model_name}")
# 3. Compile models (dry run)
result = await self.server._handle_dbt_tool("dbt_compile", {"models": model_name})
print(f"✅ Compiled model: {model_name}")
async def test_data_quality_workflow(self):
"""Test data quality testing workflow"""
print("\n🧪 Testing Data Quality Workflow")
# 1. Discover models for testing
models = await self.server._discover_models()
staging_models = [m for m in models if m['layer'] == 'staging']
if staging_models:
# 2. Run tests on staging models
staging_model = staging_models[0]['name']
result = await self.server._handle_dbt_tool("dbt_test", {"models": staging_model})
print(f"✅ Tested staging model: {staging_model}")
async def test_database_exploration_workflow(self):
"""Test database exploration workflow"""
print("\n🔍 Testing Database Exploration Workflow")
# 1. Query database for table information
tables_query = "SELECT table_name FROM information_schema.tables WHERE table_schema = 'main'"
result = await self.server._handle_remote_tool("remote_query_database", {
"sql": tables_query,
"limit": 10
})
self.assertFalse(result.isError)
print("✅ Queried database tables")
# 2. Check specific model data
sample_query = "SELECT * FROM main.stg_employees LIMIT 5"
result = await self.server._handle_remote_tool("remote_query_database", {
"sql": sample_query,
"limit": 5
})
print("✅ Sampled model data")
async def test_project_discovery_workflow(self):
"""Test project discovery and metadata workflow"""
print("\n📊 Testing Project Discovery Workflow")
# 1. List all models
result = await self.server._handle_discovery_tool("discovery_list_models", {})
self.assertFalse(result.isError)
models_data = json.loads(result.content[0].text)
self.assertIsInstance(models_data, list)
print(f"✅ Listed {len(models_data)} models")
# 2. Get project configuration
config = await self.server._get_project_config()
self.assertIn('project_name', config)
print("✅ Retrieved project configuration")
# 3. Get model lineage information
result = await self.server._handle_discovery_tool("discovery_lineage", {})
self.assertFalse(result.isError)
print("✅ Retrieved model lineage")
async def test_error_handling_scenarios(self):
"""Test error handling in various scenarios"""
print("\n⚠️ Testing Error Handling Scenarios")
# 1. Invalid SQL query
result = await self.server._handle_remote_tool("remote_query_database", {
"sql": "SELECT * FROM nonexistent_table",
"limit": 10
})
self.assertTrue(result.isError)
print("✅ Handled invalid SQL query error")
# 2. Invalid model name
result = await self.server._handle_discovery_tool("discovery_model_details", {
"model_name": "nonexistent_model"
})
self.assertTrue(result.isError)
print("✅ Handled invalid model name error")
# 3. Invalid dbt command
result = await self.server._handle_dbt_tool("dbt_invalid_command", {})
self.assertTrue(result.isError)
print("✅ Handled invalid dbt command error")
async def test_performance_scenarios(self):
"""Test performance under various scenarios"""
print("\n⚡ Testing Performance Scenarios")
# 1. Large query with limit
result = await self.server._handle_remote_tool("remote_query_database", {
"sql": "SELECT * FROM main.stg_employees",
"limit": 1000
})
self.assertFalse(result.isError)
print("✅ Handled large query with limit")
# 2. Multiple model discovery calls
for i in range(3):
models = await self.server._discover_models()
self.assertGreater(len(models), 0)
print("✅ Handled multiple discovery calls")
# 3. Concurrent operations simulation
tasks = [
self.server._get_project_config(),
self.server._discover_models(),
self.server._get_project_config()
]
results = await asyncio.gather(*tasks)
self.assertEqual(len(results), 3)
print("✅ Handled concurrent operations")
def run_functional_tests():
"""Run functional tests and generate report"""
print("🎭 Starting MCP Server Functional Tests")
print("=" * 50)
# Run tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(TestFunctionalScenarios)
runner = unittest.TextTestRunner(verbosity=2, stream=sys.stdout)
result = runner.run(suite)
# Generate summary
print("\n" + "=" * 50)
print("🎯 Functional Test Results Summary")
print("=" * 50)
print(f"Tests run: {result.testsRun}")
print(f"Failures: {len(result.failures)}")
print(f"Errors: {len(result.errors)}")
success_rate = ((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun * 100) if result.testsRun > 0 else 0
print(f"✅ Success Rate: {success_rate:.1f}%")
return result
if __name__ == "__main__":
run_functional_tests()