import path from 'path';
process.env.RUNBOOK_ROOT = path.resolve('tests/fixtures');
process.env.LLM_API_KEY = 'mock-key-for-testing';
import { loadConfig } from '../../src/adapters/config.mjs';
import { createFsAdapter } from '../../src/adapters/fsio.mjs';
import { createIndexer } from '../../src/services/indexer.mjs';
import { describe, test, expect, runTests } from '../unit/core/_harness.mjs';
// Mock LLM adapter that returns controlled responses
function mockLLMAdapter() {
return {
generateSummary: async (chunks, question) => {
// Mock LLM response - should only use provided chunks, no hallucination
const titles = chunks.map(c => c.title || 'Unknown').join(', ');
return {
summary: `Based on the retrieved runbooks (${titles}), here are the key points for "${question}". This response is generated from the provided documentation only.`,
confidence: 0.8,
model: 'mock-llm-v1'
};
}
};
}
// Mock answerService for testing
function mockAnswerService({ searchService, llmAdapter }) {
return {
answer: async (question, options = {}) => {
const searchResult = searchService.search(question, { topK: options.topK || 3 });
const citations = searchResult.results.map((r, idx) => ({
id: idx + 1,
doc_id: r.docId,
title: r.title,
snippet: r.text.slice(0, 150) + '...',
stale: r.stale
}));
let summary = null;
let mode = 'offline';
if (llmAdapter && options.useLLM !== false) {
try {
const llmResult = await llmAdapter.generateSummary(searchResult.results, question);
summary = llmResult.summary;
mode = 'online';
} catch (err) {
// Graceful degradation
mode = 'degraded';
}
}
return {
question,
summary,
citations,
risks: searchResult.results.flatMap(r => r.risks || []),
safe_ops: searchResult.results.flatMap(r => r.safeOps || []),
mode
};
}
};
}
const config = loadConfig(process.env);
const fsAdapter = createFsAdapter(config.root);
const indexer = createIndexer({ fsAdapter, config, logger:{ log:()=>{} } });
describe('us2:answer-llm-on', () => {
test('returns answer with LLM summary when API key available', async () => {
const index = await indexer.buildIndex();
const searchService = {
search: (q, opts) => ({
results: index.documents.slice(0, opts?.topK || 3).map(d => ({
docId: d.id,
title: d.title,
text: `Content for ${d.title}`,
risks: d.risks || [],
safeOps: d.safeOps || [],
stale: d.freshness?.stale || false
}))
})
};
const llmAdapter = mockLLMAdapter();
const answerService = mockAnswerService({ searchService, llmAdapter });
const result = await answerService.answer('How to restart service?', { useLLM: true });
expect(result.summary !== null).toBe(true);
expect(result.summary.includes('Based on the retrieved runbooks')).toBe(true);
expect(Array.isArray(result.citations)).toBe(true);
expect(result.mode).toBe('online');
});
test('gracefully degrades to offline mode when LLM fails', async () => {
const index = await indexer.buildIndex();
const searchService = {
search: (q, opts) => ({
results: index.documents.slice(0, opts?.topK || 3).map(d => ({
docId: d.id,
title: d.title,
text: `Content for ${d.title}`,
risks: d.risks || [],
safeOps: d.safeOps || [],
stale: d.freshness?.stale || false
}))
})
};
const failingLLMAdapter = {
generateSummary: async () => { throw new Error('LLM service unavailable'); }
};
const answerService = mockAnswerService({ searchService, llmAdapter: failingLLMAdapter });
const result = await answerService.answer('How to restart service?', { useLLM: true });
expect(result.summary).toBe(null);
expect(result.mode).toBe('degraded');
expect(Array.isArray(result.citations)).toBe(true);
});
});
await runTests();