Skip to main content
Glama

WaPulse WhatsApp MCP Server

by Quegenx
loadChatAllMessages.ts•5.38 kB
import { z } from 'zod'; import { Tool } from '@modelcontextprotocol/sdk/types.js'; import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js'; import { makeApiRequest } from '../../utils/helpers.js'; export const loadChatAllMessagesTool: Tool = { name: 'load_chat_messages', description: 'Retrieve all messages from a specific chat or conversation using WaPulse API', annotations: { title: 'Load WhatsApp Chat Messages', streamingHint: true, readOnlyHint: true, openWorldHint: true }, inputSchema: { type: 'object', properties: { id: { type: 'string', description: "The ID of the chat (e.g., '353871234567@c.us' for user or 'groupid@g.us' for group)" }, type: { type: 'string', description: 'The type of the chat', enum: ['user', 'group'] }, until: { type: 'string', description: 'The timestamp of the last message to load. If not provided, all messages will be loaded' }, customToken: { type: 'string', description: 'Override default token for this request' }, customInstanceID: { type: 'string', description: 'Override default instance ID for this request' } }, required: ['id', 'type'], additionalProperties: false } }; export async function handleLoadChatAllMessages(args: any, context?: any) { const schema = z.object({ id: z.string(), type: z.enum(['user', 'group']), until: z.string().optional(), customToken: z.string().optional(), customInstanceID: z.string().optional() }); const { id, type, until, customToken, customInstanceID } = schema.parse(args); const { log, streamContent, reportProgress } = context || {}; try { if (log) { log.info("Loading chat messages", { chatId: id, type, until }); } const requestBody: any = { id, type }; if (until) { requestBody.until = until; } // Stream initial status if (streamContent) { const chatTypeEmoji = type === 'group' ? 'šŸ‘„' : 'šŸ‘¤'; await streamContent({ type: 'text', text: `${chatTypeEmoji} Loading messages from ${type} chat: ${id}\n\n` }); } const response = await makeApiRequest('/api/loadChatAllMessages', requestBody, customToken, customInstanceID); const chatTypeEmoji = type === 'group' ? 'šŸ‘„' : 'šŸ‘¤'; const messages = Array.isArray(response.messages) ? response.messages : []; const messageCount = messages.length; if (log) { log.info("Chat messages loaded", { chatId: id, messageCount }); } // Stream messages progressively if (streamContent && messages.length > 0) { await streamContent({ type: 'text', text: `šŸ“± Found ${messageCount} messages:\n\n` }); for (let i = 0; i < Math.min(messages.length, 10); i++) { const msg = messages[i]; if (reportProgress) { await reportProgress({ progress: i + 1, total: Math.min(messages.length, 10) }); } const timestamp = msg.timestamp ? new Date(msg.timestamp * 1000).toLocaleString() : 'Unknown time'; const from = msg.from || 'Unknown sender'; const body = msg.body || '[No text content]'; // Handle different message types if (msg.type === 'image' && msg.mediaUrl) { await streamContent({ type: 'text', text: `šŸ“· [${timestamp}] ${from}: Image message\n` }); try { await streamContent({ type: 'text', text: ` šŸ–¼ļø Image URL: ${msg.mediaUrl}\n` }); } catch (error) { await streamContent({ type: 'text', text: ` āš ļø Could not load image: ${msg.mediaUrl}\n` }); } } else if (msg.type === 'document' && msg.mediaUrl) { await streamContent({ type: 'text', text: `šŸ“Ž [${timestamp}] ${from}: Document - ${msg.filename || 'Unknown file'}\n šŸ“„ ${msg.mediaUrl}\n` }); } else if (msg.type === 'audio' && msg.mediaUrl) { await streamContent({ type: 'text', text: `šŸŽµ [${timestamp}] ${from}: Audio message\n šŸ”Š ${msg.mediaUrl}\n` }); } else { // Regular text message await streamContent({ type: 'text', text: `šŸ’¬ [${timestamp}] ${from}: ${body.substring(0, 100)}${body.length > 100 ? '...' : ''}\n` }); } } if (messages.length > 10) { await streamContent({ type: 'text', text: `\n... and ${messages.length - 10} more messages\n` }); } } return { content: [{ type: 'text', text: `\nāœ… Chat messages loaded successfully!\n\n${chatTypeEmoji} Chat ID: ${id}\nšŸ“Š Type: ${type}\nšŸ’¬ Messages found: ${messageCount}\n${until ? `ā° Until: ${until}\n` : ''}šŸ“‹ Full Response: ${JSON.stringify(response, null, 2)}` }] }; } catch (error: any) { if (log) { log.error("Failed to load chat messages", { error: error.message, chatId: id, type }); } throw new McpError(ErrorCode.InternalError, `Failed to load messages from ${type} chat ${id}: ${error.message}`); } }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Quegenx/wapulse-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server