perplexity_ask_pro
Generate detailed, structured responses for search, research, and reasoning tasks using 2025 Perplexity models, with options for citations, advanced parameters, and domain-specific filtering.
Instructions
Ultra-Pro Perplexity API with CORRECT 2025 models, full structured responses, caching, and advanced features. Supports search, research, reasoning, and offline models with proper parameters.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| format | No | Response format type | full |
| messages | Yes | Array of conversation messages | |
| model | No | Perplexity model to use (2025 correct models only) | sonar-pro |
| options | No | Advanced options for the API call |
Implementation Reference
- index.js:727-741 (handler)Handler for the 'perplexity_ask_pro' tool call. Extracts arguments, validates API key, invokes the Perplexity API via class methods, parses/formats the response, and returns MCP-formatted content.case "perplexity_ask_pro": const { messages, model = 'sonar-pro', format = 'full', options = {} } = args; if (!nascoderMCP.apiKey) { throw new Error('PERPLEXITY_API_KEY environment variable is required'); } const response = await nascoderMCP.callPerplexityAPI(messages, model, options); const parsed = nascoderMCP.parseResponse(response, format); const formatted = nascoderMCP.formatResponse(parsed, format); return { content: [formatted] };
- index.js:601-672 (schema)Input schema definition for the 'perplexity_ask_pro' tool, specifying parameters like messages, model selection from 2025 Perplexity models, response format, and advanced search options.inputSchema: { type: "object", properties: { messages: { type: "array", items: { type: "object", properties: { role: { type: "string", description: "Message role (system, user, assistant)" }, content: { type: "string", description: "Message content" } }, required: ["role", "content"] }, description: "Array of conversation messages" }, model: { type: "string", enum: [ "sonar-pro", "sonar", "sonar-deep-research", "sonar-reasoning-pro", "sonar-reasoning", "r1-1776" ], default: "sonar-pro", description: "Perplexity model to use (2025 correct models only)" }, format: { type: "string", enum: ["simple", "with-citations", "structured", "full"], default: "full", description: "Response format type" }, options: { type: "object", properties: { maxTokens: { type: "number", default: 2000, description: "Maximum tokens (1-8000)" }, temperature: { type: "number", default: 0.2, description: "Randomness (0-2)" }, topP: { type: "number", default: 0.9, description: "Nucleus sampling (0-1)" }, topK: { type: "number", default: 0, description: "Top-k filtering (0 = disabled)" }, searchMode: { type: "string", enum: ["web", "academic"], default: "web", description: "Search mode - 'academic' prioritizes scholarly sources" }, reasoningEffort: { type: "string", enum: ["low", "medium", "high"], default: "medium", description: "Reasoning effort for reasoning models" }, returnImages: { type: "boolean", default: false }, returnRelatedQuestions: { type: "boolean", default: false }, searchRecency: { type: "string", description: "Filter by time (e.g., 'week', 'day')" }, searchDomains: { type: "array", items: { type: "string" }, description: "Filter search to specific domains (max 10)" }, searchAfterDate: { type: "string", description: "Search after date (MM/DD/YYYY)" }, searchBeforeDate: { type: "string", description: "Search before date (MM/DD/YYYY)" } }, description: "Advanced options for the API call" } }, required: ["messages"] }
- index.js:717-719 (registration)Registration of tool list handler, which returns the TOOLS array including 'perplexity_ask_pro'.server.setRequestHandler(ListToolsRequestSchema, async () => { return { tools: TOOLS }; });
- index.js:229-396 (helper)Core helper method that implements the Perplexity API call logic, including caching, rate limiting, validation, correct 2025 API payload construction, retries, analytics tracking, and error handling.async callPerplexityAPI(messages, model = 'sonar-pro', options = {}) { const startTime = Date.now(); try { // Validate inputs if (!Array.isArray(messages) || messages.length === 0) { throw new Error('Messages array is required and cannot be empty'); } if (!this.apiKey) { throw new Error('PERPLEXITY_API_KEY environment variable is required'); } // Validate model exists if (!this.models[model]) { throw new Error(`Invalid model: ${model}. Available models: ${Object.keys(this.models).join(', ')}`); } // Check rate limit with fallback if (this.rateLimiter) { try { await this.rateLimiter.consume('perplexity-api'); } catch (rateLimitError) { throw new Error('Rate limit exceeded. Please wait before making more requests.'); } } // Check cache first with fallback let cached = null; if (this.cache) { try { const cacheKey = this.generateCacheKey(messages, model, options); cached = this.cache.get(cacheKey); } catch (cacheError) { this.logger.warn('Cache lookup failed:', cacheError.message); } } if (cached) { this.analytics.cacheHits++; this.logger.info('Cache hit for request'); return { ...cached, fromCache: true }; } this.analytics.cacheMisses++; // ✅ CORRECT 2025 PERPLEXITY API REQUEST PAYLOAD const payload = { model: model, messages: messages.map(msg => ({ role: msg.role || 'user', content: String(msg.content || '') })), max_tokens: Math.min(Math.max(options.maxTokens || 2000, 1), 8000), temperature: Math.min(Math.max(options.temperature || 0.2, 0), 2), top_p: Math.min(Math.max(options.topP || 0.9, 0), 1), top_k: Math.max(options.topK || 0, 0), stream: false, presence_penalty: Math.min(Math.max(options.presencePenalty || 0, -2), 2), frequency_penalty: Math.min(Math.max(options.frequencyPenalty || 0, -2), 2), // ✅ CORRECT 2025 SEARCH PARAMETERS search_mode: options.searchMode || 'web', // 'web' or 'academic' reasoning_effort: options.reasoningEffort || 'medium', // 'low', 'medium', 'high' (for reasoning models) // ✅ CORRECT FILTER PARAMETERS search_domain_filter: Array.isArray(options.searchDomains) ? options.searchDomains : [], return_images: options.returnImages || false, return_related_questions: options.returnRelatedQuestions || false, search_recency_filter: options.searchRecency || undefined, search_after_date_filter: options.searchAfterDate || undefined, search_before_date_filter: options.searchBeforeDate || undefined, last_updated_after_filter: options.lastUpdatedAfter || undefined, last_updated_before_filter: options.lastUpdatedBefore || undefined, // ✅ CORRECT WEB SEARCH OPTIONS web_search_options: options.webSearchOptions || undefined, // ✅ CORRECT RESPONSE FORMAT response_format: options.responseFormat || undefined }; // Remove undefined values to clean up payload Object.keys(payload).forEach(key => { if (payload[key] === undefined) { delete payload[key]; } }); // Make API call with retry logic let lastError; const maxRetries = 3; for (let attempt = 1; attempt <= maxRetries; attempt++) { try { const response = await fetch(`${this.baseUrl}/chat/completions`, { method: 'POST', headers: { 'Authorization': `Bearer ${this.apiKey}`, 'Content-Type': 'application/json', 'User-Agent': 'NasCoder-Perplexity-MCP/2.0' }, body: JSON.stringify(payload), timeout: 60000 // 60 second timeout for research models }); if (!response.ok) { const errorText = await response.text().catch(() => 'Unknown error'); throw new Error(`API Error ${response.status}: ${errorText}`); } const data = await response.json(); // Validate response structure if (!data || typeof data !== 'object') { throw new Error('Invalid response format from API'); } // Cache the response with error handling if (this.cache) { try { const cacheKey = this.generateCacheKey(messages, model, options); this.cache.set(cacheKey, data); } catch (cacheError) { this.logger.warn('Failed to cache response:', cacheError.message); } } // Update analytics const responseTime = Date.now() - startTime; this.analytics.totalRequests++; this.analytics.avgResponseTime = (this.analytics.avgResponseTime * (this.analytics.totalRequests - 1) + responseTime) / this.analytics.totalRequests; if (data.usage) { this.analytics.tokenUsage.total += data.usage.total_tokens || 0; this.analytics.tokenUsage.prompt += data.usage.prompt_tokens || 0; this.analytics.tokenUsage.completion += data.usage.completion_tokens || 0; } this.analytics.modelUsage[model] = (this.analytics.modelUsage[model] || 0) + 1; this.logger.info(`API call successful - Model: ${model}, Tokens: ${data.usage?.total_tokens || 0}, Time: ${responseTime}ms`); return { ...data, fromCache: false, responseTime }; } catch (error) { lastError = error; if (attempt < maxRetries) { const delay = Math.pow(2, attempt) * 1000; // Exponential backoff this.logger.warn(`API call attempt ${attempt} failed, retrying in ${delay}ms:`, error.message); await new Promise(resolve => setTimeout(resolve, delay)); } } } throw lastError; } catch (error) { this.analytics.errors++; this.logger.error('API call failed after all retries:', error.message); throw new Error(`Perplexity API call failed: ${error.message}`); } finally { this.saveAnalytics(); } }
- index.js:727-741 (helper)Supporting helper methods parseResponse and formatResponse used by the tool handler to process and structure the API response.case "perplexity_ask_pro": const { messages, model = 'sonar-pro', format = 'full', options = {} } = args; if (!nascoderMCP.apiKey) { throw new Error('PERPLEXITY_API_KEY environment variable is required'); } const response = await nascoderMCP.callPerplexityAPI(messages, model, options); const parsed = nascoderMCP.parseResponse(response, format); const formatted = nascoderMCP.formatResponse(parsed, format); return { content: [formatted] };