Skip to main content
Glama

LLM Researcher

by Code-Hex
integration.test.ts4.99 kB
import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import { LLMResearcher } from '@/index.js'; import { config } from '@/config.js'; describe('LLMResearcher Integration', () => { let researcher: LLMResearcher; beforeEach(() => { researcher = new LLMResearcher(); config.verbose = true; }); afterEach(async () => { await researcher.cleanup(); }); describe('End-to-end search and extraction', () => { it('should perform complete search workflow', async () => { // Test search functionality const searchResults = await researcher.search('typescript tutorial'); // Check if it's a SearchResultsPage object with results array expect(typeof searchResults).toBe('object'); expect(searchResults).toHaveProperty('results'); expect(Array.isArray(searchResults.results)).toBe(true); expect(searchResults.results.length).toBeGreaterThan(0); const firstResult = searchResults.results[0]!; expect(typeof firstResult.title).toBe('string'); expect(typeof firstResult.url).toBe('string'); expect(firstResult.url).toMatch(/^https?:\/\//); // Test content extraction with a reliable URL instead of random search result // Use example.com as it's designed to be fast and reliable for testing const testUrl = 'https://example.com'; const extractedContent = await researcher.extractFromUrl(testUrl, false); expect(typeof extractedContent).toBe('object'); expect(typeof extractedContent.title).toBe('string'); expect(typeof extractedContent.url).toBe('string'); expect(typeof extractedContent.content).toBe('string'); expect(typeof extractedContent.extractedAt).toBe('string'); expect(extractedContent.url).toBe(testUrl); expect(extractedContent.content.length).toBeGreaterThan(0); }, 45000); it('should extract content from search results with fallback', async () => { // Test search functionality const searchResults = await researcher.search('typescript tutorial'); expect(searchResults.results.length).toBeGreaterThan(0); const firstResult = searchResults.results[0]!; // Try to extract from the first result, but with error handling try { const extractedContent = await researcher.extractFromUrl(firstResult.url, false); expect(typeof extractedContent).toBe('object'); expect(typeof extractedContent.title).toBe('string'); expect(typeof extractedContent.url).toBe('string'); expect(typeof extractedContent.content).toBe('string'); expect(extractedContent.url).toBe(firstResult.url); expect(extractedContent.content.length).toBeGreaterThan(0); } catch (error) { // If the first result fails (due to timeout or other issues), // that's okay - this test documents that some sites might be slow expect(error).toBeInstanceOf(Error); expect((error as Error).message).toContain('Failed to extract content'); } }, 60000); it('should handle direct URL extraction', async () => { const testUrl = 'https://example.com'; const content = await researcher.extractFromUrl(testUrl, false); expect(content.url).toBe(testUrl); expect(content.content.length).toBeGreaterThan(0); expect(content.title.length).toBeGreaterThan(0); // Verify timestamp format expect(() => new Date(content.extractedAt)).not.toThrow(); expect(new Date(content.extractedAt).getTime()).toBeGreaterThan(Date.now() - 60000); }, 30000); }); describe('Component integration', () => { it('should have properly initialized components', () => { expect(researcher.searcher).toBeDefined(); expect(researcher.extractor).toBeDefined(); expect(researcher.cli).toBeDefined(); expect(typeof researcher.searcher.search).toBe('function'); expect(typeof researcher.extractor.extract).toBe('function'); expect(typeof researcher.cli.displayResults).toBe('function'); }); it('should handle errors gracefully', async () => { // Test invalid URL await expect( researcher.extractFromUrl('https://nonexistent-domain-12345.com', false) ).rejects.toThrow(); // Test empty search const emptyResults = await researcher.search(''); expect(Array.isArray(emptyResults.results)).toBe(true); }, 15000); }); describe('Resource cleanup', () => { it('should properly clean up resources', async () => { // Perform some operations await researcher.search('test query'); await researcher.extractFromUrl('https://example.com', false); // Cleanup should not throw await expect(researcher.cleanup()).resolves.not.toThrow(); // Browser should be closed expect((researcher.extractor as any).browser).toBeNull(); }, 30000); }); });

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Code-Hex/light-research-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server