Skip to main content
Glama

Perplexity MCP Server

by Rohit-Seelam
server.py5.76 kB
"""Perplexity MCP Server with three complexity levels.""" import logging import sys from typing import Dict, List, Any from mcp.server.fastmcp import FastMCP from client import PerplexityClient from config import TOOL_CONFIGS # Configure logging to stderr (critical for MCP protocol) logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", handlers=[logging.StreamHandler(sys.stderr)] ) logger = logging.getLogger(__name__) # Initialize FastMCP server mcp = FastMCP("Perplexity MCP") # Global client instance perplexity_client = None def get_perplexity_client() -> PerplexityClient: """Get or create the Perplexity client instance.""" global perplexity_client if perplexity_client is None: perplexity_client = PerplexityClient() return perplexity_client @mcp.tool() def perplexity_small(query: str, messages: List[Dict[str, str]] = None) -> Dict[str, Any]: """ Quick and reliable queries using Perplexity's sonar-pro model. Best for: Fast factual questions, basic research, immediate answers. Uses default parameters for optimal speed and cost-effectiveness. Args: query: The question or prompt to send to Perplexity messages: Optional conversation context (list of {"role": "user/assistant", "content": "..."}) Returns: Dictionary with content and citations """ try: client = get_perplexity_client() # Prepare messages if messages is None: messages = [] # Add the current query messages.append({"role": "user", "content": query}) # Get tool configuration config = TOOL_CONFIGS["small"] # Make API request response = client.chat_completion(messages=messages, **config) # Format and return response return client.format_response(response) except Exception as e: logger.exception("Error in perplexity_small") return { "error": "tool_error", "message": f"Failed to process query: {str(e)}" } @mcp.tool() def perplexity_medium(query: str, messages: List[Dict[str, str]] = None) -> Dict[str, Any]: """ Enhanced reasoning with moderate search depth using sonar-reasoning-pro. Best for: Complex questions requiring analysis, moderate research depth, technical explanations with citations. Uses medium reasoning effort and search context size. Args: query: The question or prompt to send to Perplexity messages: Optional conversation context (list of {"role": "user/assistant", "content": "..."}) Returns: Dictionary with content and citations """ try: client = get_perplexity_client() # Prepare messages if messages is None: messages = [] # Add the current query messages.append({"role": "user", "content": query}) # Get tool configuration config = TOOL_CONFIGS["medium"] # Make API request response = client.chat_completion(messages=messages, **config) # Format and return response return client.format_response(response) except Exception as e: logger.exception("Error in perplexity_medium") return { "error": "tool_error", "message": f"Failed to process query: {str(e)}" } @mcp.tool() def perplexity_large(query: str, messages: List[Dict[str, str]] = None) -> Dict[str, Any]: """ Comprehensive research with maximum depth using sonar-deep-research. Best for: Deep research tasks, comprehensive analysis, complex multi-step reasoning, academic research, detailed technical investigations. Uses high reasoning effort and search context size. WARNING: This tool may take significantly longer (potentially 10-30 minutes) and may timeout on very complex queries. Args: query: The question or prompt to send to Perplexity messages: Optional conversation context (list of {"role": "user/assistant", "content": "..."}) Returns: Dictionary with content and citations """ try: client = get_perplexity_client() # Prepare messages if messages is None: messages = [] # Add the current query messages.append({"role": "user", "content": query}) # Get tool configuration config = TOOL_CONFIGS["large"] # Log warning about potential timeout logger.warning(f"Starting deep research query - this may take 10-30 minutes") # Make API request response = client.chat_completion(messages=messages, **config) # Format and return response return client.format_response(response) except Exception as e: logger.exception("Error in perplexity_large") return { "error": "tool_error", "message": f"Failed to process query: {str(e)}" } def main(): """Main entry point for the MCP server.""" try: # Validate environment before starting get_perplexity_client() logger.info("Starting Perplexity MCP server...") logger.info("Available tools: perplexity_small, perplexity_medium, perplexity_large") # Run the MCP server with stdio transport (required for Claude Desktop) mcp.run(transport="stdio") except Exception as e: logger.error(f"Failed to start server: {str(e)}") sys.exit(1) if __name__ == "__main__": main()

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Rohit-Seelam/Perplexity_MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server