Skip to main content
Glama
llm.test.ts1.65 kB
import { describe, it, expect, vi, beforeEach } from 'vitest'; import { optimizeContent } from '../src/llm.js'; // Mock @huggingface/transformers const { mockGenerator } = vi.hoisted(() => { return { mockGenerator: vi.fn() }; }); vi.mock('@huggingface/transformers', () => ({ pipeline: vi.fn().mockResolvedValue(mockGenerator), })); describe('optimizeContent', () => { beforeEach(() => { vi.clearAllMocks(); }); it('should return original content if no prompt is provided', async () => { const content = 'Some content'; const result = await optimizeContent(content, ''); expect(result).toBe(content); expect(mockGenerator).not.toHaveBeenCalled(); }); it('should call generator if prompt is provided', async () => { const content = 'Some long content to summarize. '.repeat(5); // Make it > 50 chars const prompt = 'Summarize this'; const mockOutput = [{ generated_text: 'Summary' }]; mockGenerator.mockResolvedValue(mockOutput); const result = await optimizeContent(content, prompt); expect(result).toBe('Summary'); expect(mockGenerator).toHaveBeenCalledWith( expect.stringContaining('Instruction: Summarize this'), expect.any(Object) ); }); it('should return original content if content is too short', async () => { const content = 'Short'; const result = await optimizeContent(content, 'Prompt'); expect(result).toBe(content); // Should not call generator for short content expect(mockGenerator).not.toHaveBeenCalled(); }); });

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/v3nom/toon-fetch'

If you have feedback or need assistance with the MCP directory API, please join our Discord server