Skip to main content
Glama

MCP Server with LLM Integration

by MelaLitho
smart_search_api.py7.33 kB
#!/usr/bin/env python3 """ HTTP API wrapper for core smart search answer() method """ from fastapi import FastAPI, HTTPException from pydantic import BaseModel from typing import Optional, List, Dict, Any import asyncio import sys import os import uvicorn # Add current directory to path sys.path.insert(0, os.path.dirname(__file__)) app = FastAPI( title="Smart Search API", description="HTTP wrapper for core smart search functionality", version="1.0.0" ) # Request/Response models class SmartSearchRequest(BaseModel): question: str database: str = "db3" # Default to your local postgres include_sql: bool = True include_semantic: bool = True include_schema: bool = True class SmartSearchResponse(BaseModel): success: bool question: str database: str answer_markdown: str strategy: str sql_queries: List[Dict[str, Any]] = [] semantic_results: List[Dict[str, Any]] = [] sources_used: Dict[str, int] = {} processing_time: float error: Optional[str] = None # Initialize smart search components (cached) _smart_search_cache = {} def get_smart_search(database: str): """Get or create smart search instance for database""" if database not in _smart_search_cache: try: # Import required components from services.smart_search import SmartSearch from services.schema_service import SchemaService from services.sql_service import SQLService from services.semantic_service import SemanticService from services.synthesis_service import SynthesisService from repositories.postgres_repository import PostgresRepository from sqlalchemy import create_engine # Get database configuration config_vars = {} exec(open('config.py').read(), config_vars) Config = config_vars['Config'] connection_string = Config.SQLALCHEMY_BINDS[database] # Setup components engine = create_engine(connection_string) postgres_repo = PostgresRepository(engine) schema_service = SchemaService(postgres_repo) sql_service = SQLService(postgres_repo, schema_service, {}) semantic_service = SemanticService(postgres_repo, {}) synthesis_service = SynthesisService({}) # Create smart search instance smart_search = SmartSearch( schema_service=schema_service, sql_service=sql_service, semantic_service=semantic_service, synthesis_service=synthesis_service ) _smart_search_cache[database] = smart_search except Exception as e: raise HTTPException(status_code=500, detail=f"Failed to initialize smart search: {str(e)}") return _smart_search_cache[database] @app.get("/health") async def health_check(): """Health check endpoint""" return {"status": "healthy", "message": "Smart Search API is running"} @app.post("/api/smart-search", response_model=SmartSearchResponse) async def smart_search_endpoint(request: SmartSearchRequest): """Core smart search endpoint - direct access to answer() method""" try: # Get smart search instance smart_search = get_smart_search(request.database) # Call the core answer() method result = smart_search.answer( question=request.question, include_sql=request.include_sql, include_semantic=request.include_semantic, include_schema=request.include_schema ) # Convert result to response format return SmartSearchResponse( success=result.success, question=request.question, database=request.database, answer_markdown=result.answer_markdown or "", strategy=str(result.strategy.value) if result.strategy else "unknown", sql_queries=result.sql_queries or [], semantic_results=result.semantic_results or [], sources_used=result.sources_used or {}, processing_time=result.processing_time or 0.0, error=result.error ) except Exception as e: raise HTTPException(status_code=500, detail=f"Smart search failed: {str(e)}") @app.get("/api/databases") async def list_databases(): """List available databases""" try: config_vars = {} exec(open('config.py').read(), config_vars) Config = config_vars['Config'] return { "databases": list(Config.SQLALCHEMY_BINDS.keys()), "default": "db3" } except Exception as e: raise HTTPException(status_code=500, detail=f"Failed to list databases: {str(e)}") @app.get("/api/info") async def get_api_info(): """Get API information and available models""" return { "name": "Smart Search API", "version": "1.0.0", "description": "HTTP wrapper for core smart search functionality", "endpoints": { "smart_search": "/api/smart-search", "databases": "/api/databases", "health": "/health", "info": "/api/info" }, "databases": { "db1": "PostgreSQL Remote 1", "db2": "PostgreSQL Remote 2", "db3": "PostgreSQL Local (default)" }, "features": [ "Question Classification", "Multi-Strategy Search (SQL, Semantic, Hybrid, Schema)", "Markdown Response Generation", "Performance Metrics", "Error Handling" ] } @app.get("/api/models") async def get_models(): """Get available LLM models""" try: import requests # Try to get models from Ollama ollama_models = [] try: response = requests.get("http://localhost:11434/api/tags", timeout=2) if response.status_code == 200: data = response.json() ollama_models = [model["name"] for model in data.get("models", [])] except: pass # Default known models if Ollama is not available default_models = [ "mistral:latest", "gpt-oss:20b", "bge-large:latest" ] # Use Ollama models if available, otherwise use defaults available_models = ollama_models if ollama_models else default_models return { "models": available_models, "default": "mistral:latest", "source": "ollama" if ollama_models else "default" } except Exception as e: # Fallback to known working models return { "models": [ "mistral:latest", "gpt-oss:20b", "bge-large:latest" ], "default": "mistral:latest", "source": "fallback", "error": str(e) } if __name__ == "__main__": print("Starting Smart Search API...") print("API available at: http://localhost:8002") print("Health check: http://localhost:8002/health") print("Interactive docs: http://localhost:8002/docs") print("Core endpoint: POST http://localhost:8002/api/smart-search") uvicorn.run(app, host="0.0.0.0", port=8002)

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/MelaLitho/MCPServer'

If you have feedback or need assistance with the MCP directory API, please join our Discord server