jp_lit_search_cache_index
Cross-search saved cache entries from previous Japanese literature searches and return cache keys for re-extracting those results.
Instructions
保存済み jp_lit_search キャッシュを横断検索し、再抽出に使える cache_key 一覧を返す
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| query | Yes | ||
| session_id | No | ||
| source | No | ||
| issued_from | No | ||
| issued_to | No | ||
| saved_on | No | ||
| saved_from | No | ||
| saved_to | No | ||
| limit | No |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| query | Yes | ||
| session_id | Yes | ||
| source | Yes | ||
| issued_from | Yes | ||
| issued_to | Yes | ||
| saved_on | Yes | ||
| saved_on_resolved | Yes | ||
| saved_from | Yes | ||
| saved_to | Yes | ||
| total | Yes | ||
| limit | Yes | ||
| cache_keys | Yes | ||
| items | Yes |
Implementation Reference
- The main handler function `createJpLitSearchCacheIndexTool` that executes the tool logic. It reads cache directories, filters by session, date, source, issue date, and text query, then returns matched cache entries.
export function createJpLitSearchCacheIndexTool( cache: FileCache, sessions: SessionStore, baseDir = process.cwd() ) { return async (input: unknown) => { const parsed = searchCacheIndexInputSchema.parse(input); const normalizedQuery = normalizeText(parsed.query); const { effectiveSavedFrom, effectiveSavedTo, resolvedSavedOn } = resolveSavedDateFilter(parsed); const targetSessionIds = parsed.session_id ? new Set([parsed.session_id]) : null; const allSessions = parsed.session_id ? [await sessions.readById(parsed.session_id)] : await sessions.listAll(); const cacheToSessionIds = new Map<string, Set<string>>(); for (const session of allSessions) { for (const entry of session.entries) { if (entry.tool !== "jp_lit_search") { continue; } const set = cacheToSessionIds.get(entry.cache_key) ?? new Set<string>(); set.add(session.session_id); cacheToSessionIds.set(entry.cache_key, set); } } const searchCacheDirs = [ path.join(getCacheRoot(baseDir), "jp_lit_search"), path.join(getLegacyCacheRoot(baseDir), "jp_lit_search") ]; const cacheKeys = Array.from( new Set( ( await Promise.all( searchCacheDirs.map(async (directory) => { try { return await readdir(directory); } catch { return [] as string[]; } }) ) ) .flat() .filter((filename) => filename.endsWith(".json")) .map((filename) => filename.replace(/\.json$/i, "")) ) ); const results: SearchCacheIndexOutput["items"] = []; for (const cacheKey of cacheKeys) { if (!cacheToSessionIds.has(cacheKey)) { continue; } const cached = await cache.read<SearchOutput>("jp_lit_search", cacheKey); if (!cached) { continue; } const output = cached.structured_content; if (effectiveSavedFrom && cached.saved_at < effectiveSavedFrom) { continue; } if (effectiveSavedTo && cached.saved_at > effectiveSavedTo) { continue; } if (parsed.source && output.source !== parsed.source) { continue; } const items = output.items; if (parsed.issued_from || parsed.issued_to) { const hasInRange = items.some((item) => { if (!item.issued_at) { return false; } if (parsed.issued_from && item.issued_at < parsed.issued_from) { return false; } if (parsed.issued_to && item.issued_at > parsed.issued_to) { return false; } return true; }); if (!hasInRange) { continue; } } const matchedFields = new Set<MatchedField>(); if (typeof output.query === "string" && normalizeText(output.query).includes(normalizedQuery)) { matchedFields.add("query"); } const itemMatched = matchItems(items, normalizedQuery); for (const field of itemMatched) { matchedFields.add(field); } if (matchedFields.size === 0) { continue; } const sessionIds = Array.from(cacheToSessionIds.get(cacheKey) ?? []).filter((sessionId) => targetSessionIds ? targetSessionIds.has(sessionId) : true ); if (sessionIds.length === 0) { continue; } results.push({ cache_key: cacheKey, session_ids: sessionIds, saved_at: cached.saved_at, source: output.source, query_preview: createPreview(output.query), total: output.total, item_count: output.items.length, matched_fields: Array.from(matchedFields) }); } results.sort((left, right) => right.saved_at.localeCompare(left.saved_at)); const limited = results.slice(0, parsed.limit); const structuredContent: SearchCacheIndexOutput = searchCacheIndexOutputSchema.parse({ query: parsed.query, session_id: parsed.session_id ?? null, source: parsed.source ?? null, issued_from: parsed.issued_from ?? null, issued_to: parsed.issued_to ?? null, saved_on: parsed.saved_on ?? null, saved_on_resolved: resolvedSavedOn, saved_from: parsed.saved_from ?? null, saved_to: parsed.saved_to ?? null, total: results.length, limit: parsed.limit, cache_keys: limited.map((item) => item.cache_key), items: limited }); return { content: [ { type: "text" as const, text: JSON.stringify(structuredContent, null, 2) } ], structuredContent }; }; } - src/lib/schemas.ts:370-383 (schema)Input schema `searchCacheIndexInputSchema` defining parameters: query (required), session_id, source, issued_from/to, saved_on/from/to, and limit (default 50, max 200).
export const searchCacheIndexInputSchema = z.object({ query: z.string().trim().min(1), session_id: z.string().trim().regex(/^\d{4}-\d{2}-\d{2}-\d{6}$/).optional(), source: sourceSchema.optional(), issued_from: z.string().optional(), issued_to: z.string().optional(), saved_on: z .string() .regex(/^(\d{4}-\d{2}-\d{2}|today|yesterday|last_7_days)$/) .optional(), saved_from: z.string().optional(), saved_to: z.string().optional(), limit: z.number().int().positive().max(200).default(50) }); - src/lib/schemas.ts:385-412 (schema)Output schema `searchCacheIndexOutputSchema` defining the response shape: query, session_id, source, date filters, total, limit, cache_keys, and items array with matched_fields enum.
export const searchCacheIndexOutputSchema = z.object({ query: z.string(), session_id: z.string().nullable(), source: sourceSchema.nullable(), issued_from: z.string().nullable(), issued_to: z.string().nullable(), saved_on: z.string().nullable(), saved_on_resolved: z.string().nullable(), saved_from: z.string().nullable(), saved_to: z.string().nullable(), total: z.number().int().nonnegative(), limit: z.number().int().positive(), cache_keys: z.array(z.string()), items: z.array( z.object({ cache_key: z.string(), session_ids: z.array(z.string()), saved_at: z.string(), source: sourceSchema.nullable(), query_preview: z.string().nullable(), total: z.number().int().nonnegative(), item_count: z.number().int().nonnegative(), matched_fields: z.array( z.enum(["query", "title", "author", "subject", "source_id"]) ) }) ) }); - src/server.ts:467-475 (registration)Registration of the tool as 'jp_lit_search_cache_index' on the MCP server with description, input/output schemas, and the handler function.
server.registerTool( "jp_lit_search_cache_index", { description: "保存済み jp_lit_search キャッシュを横断検索し、再抽出に使える cache_key 一覧を返す", inputSchema: searchCacheIndexInputSchema, outputSchema: searchCacheIndexOutputSchema }, searchCacheIndexTool ); - src/server.ts:314-314 (registration)Instantiation of the tool handler via `createJpLitSearchCacheIndexTool(cache, sessions)`.
const searchCacheIndexTool = createJpLitSearchCacheIndexTool(cache, sessions);