import pLimit from "p-limit";
import { DEFAULT_CONTAINER_MAP_CONCURRENCY } from "../constants.js";
import type { DockerRawContainerInfo, HostConfig } from "../types.js";
import { logError } from "../utils/errors.js";
import { LRUCacheLayer } from "./cache-layer.js";
import type { IDockerClientProvider } from "./interfaces.js";
/**
* Container to host mapping entry with multiple index keys.
* Supports lookup by full ID, short ID (12 chars), and container name.
*
* @internal This is an internal cache structure; external callers should use findContainerHost()
*/
export interface ContainerHostMapping {
containerId: string;
shortId: string;
containerName: string;
hostName: string;
host: HostConfig;
container: DockerRawContainerInfo;
timestamp: number;
}
/**
* Cache for container-to-host mappings.
* Optimizes findContainerHost() by eliminating N+1 queries.
*
* Key features:
* - Parallel host scanning with configurable concurrency
* - Multiple index keys per container (ID, name, ID prefix)
* - TTL-based expiration (default: 5 minutes)
* - Automatic cache rebuild on miss
* - LRU eviction policy
*
* @example
* ```typescript
* const cache = new ContainerHostMapCache();
* await cache.buildMapping(hosts, dockerService);
*
* const result = await cache.findContainerHost("my-container", hosts, dockerService);
* // Returns { host, container } immediately without scanning all hosts
* ```
*/
export class ContainerHostMapCache {
private cache: LRUCacheLayer<string, ContainerHostMapping>;
private concurrency: number;
/**
* Create a new container-to-host mapping cache.
*
* @param ttlMs - Time-to-live for cache entries in milliseconds (default: 5 minutes)
* @param maxSize - Maximum number of cache entries (default: 500)
* @param concurrency - Number of parallel host scans (default from constants)
*/
constructor(
ttlMs: number = 5 * 60 * 1000,
maxSize: number = 500,
concurrency: number = DEFAULT_CONTAINER_MAP_CONCURRENCY
) {
this.cache = new LRUCacheLayer(maxSize, ttlMs);
this.concurrency = concurrency;
}
/**
* Build container-to-host mapping for all hosts in parallel.
* Uses concurrency control to avoid overwhelming the network.
*
* @param hosts - List of hosts to scan
* @param dockerService - Docker service to query containers
* @returns Promise that resolves when mapping is complete
*/
async buildMapping(hosts: HostConfig[], dockerService: IDockerClientProvider): Promise<void> {
const limit = pLimit(this.concurrency);
// Query each host in parallel with concurrency limit
const tasks = hosts.map((host) => limit(() => this.scanSingleHost(host, dockerService)));
await Promise.all(tasks);
}
/**
* Scan a single host and cache its containers.
* Returns true if the target container was found on this host.
*
* @param host - Host to scan
* @param dockerService - Docker service to query containers
* @param targetContainerId - Optional container ID/name to search for
* @returns Whether the target container was found on this host
*/
private async scanSingleHost(
host: HostConfig,
dockerService: IDockerClientProvider,
targetContainerId?: string
): Promise<boolean> {
const timestamp = Date.now();
try {
const docker = await dockerService.getDockerClient(host);
const rawContainers = await docker.listContainers({ all: true });
let found = false;
for (const container of rawContainers) {
const mapping: ContainerHostMapping = {
containerId: container.Id,
shortId: container.Id.slice(0, 12),
containerName: container.Names[0]?.replace(/^\//, "") || container.Id.slice(0, 12),
hostName: host.name,
host,
container,
timestamp,
};
this.cache.set(container.Id, mapping);
this.cache.set(mapping.containerName, mapping);
this.cache.set(mapping.shortId, mapping);
if (
targetContainerId &&
(container.Id === targetContainerId ||
mapping.containerName === targetContainerId ||
mapping.shortId === targetContainerId)
) {
found = true;
}
}
return found;
} catch (error) {
logError(error, {
operation: "ContainerHostMapCache:scanHost",
metadata: { host: host.name },
});
return false;
}
}
/**
* Find which host a container is on, using cache with fallback.
* Supports full ID, short ID, or container name lookups.
*
* On cache miss, scans hosts incrementally (one at a time) instead of
* rebuilding the entire mapping. Stops as soon as the container is found,
* avoiding unnecessary network calls to remaining hosts.
*
* @param containerId - Container ID (full/short) or name
* @param hosts - Array of host configurations to search
* @param dockerService - Docker service for fallback queries
* @returns Host config and container info if found, null otherwise
*/
async findContainerHost(
containerId: string,
hosts: HostConfig[],
dockerService: IDockerClientProvider
): Promise<{ host: HostConfig; container: DockerRawContainerInfo } | null> {
// Try cache lookup first
const cached = this.cache.get(containerId);
if (cached) {
// Cache hit - log for observability
console.error(`[ContainerHostMapCache] HIT: ${containerId}`);
return { host: cached.host, container: cached.container };
}
// Cache miss - scan hosts incrementally instead of full rebuild
console.error(`[ContainerHostMapCache] MISS: ${containerId} - scanning incrementally`);
for (const host of hosts) {
const found = await this.scanSingleHost(host, dockerService, containerId);
if (found) {
const result = this.cache.get(containerId);
if (result) {
console.error(
`[ContainerHostMapCache] Found ${containerId} on ${host.name} (scanned incrementally)`
);
return { host: result.host, container: result.container };
}
}
}
return null;
}
/**
* Invalidate entire cache after container lifecycle changes.
* Call this after container state changes (start, stop, pause, resume, remove, create).
*
* Note: Clears the ENTIRE cache for all hosts. This is simpler than selective
* per-host invalidation and the cache will rebuild automatically on next query.
* For production use with many hosts, consider implementing per-host invalidation.
*/
invalidateCache(): void {
const stats = this.cache.getStats();
console.error(
`[ContainerHostMapCache] Invalidating entire cache (current size: ${stats.size})`
);
// Clear entire cache - simpler than selective removal
// Cache will rebuild on next query
this.cache.clear();
}
/**
* Warm cache by pre-populating with all containers on startup.
* Reduces initial query latency.
*
* @param hosts - Array of host configurations
* @param dockerService - Docker service for API calls
*/
async warmCache(hosts: HostConfig[], dockerService: IDockerClientProvider): Promise<void> {
console.error("[ContainerHostMapCache] Warming cache...");
const start = Date.now();
await this.buildMapping(hosts, dockerService);
const stats = this.cache.getStats();
const elapsed = Date.now() - start;
console.error(`[ContainerHostMapCache] Cache warmed: ${stats.size} entries in ${elapsed}ms`);
}
/**
* Get cache statistics for monitoring and debugging.
* Returns metrics on hits, misses, size, and evictions.
*
* @returns Cache statistics object
*/
getStats(): { hits: number; misses: number; size: number; maxSize: number; evictions: number } {
return this.cache.getStats();
}
/**
* Log cache statistics to console.error for observability.
* Outputs formatted stats with hit rate calculation.
*/
logStats(): void {
const stats = this.getStats();
const total = stats.hits + stats.misses;
const hitRate = total > 0 ? ((stats.hits / total) * 100).toFixed(2) : "0.00";
console.error(`[ContainerHostMapCache] Stats:
- Hits: ${stats.hits}
- Misses: ${stats.misses}
- Hit Rate: ${hitRate}%
- Size: ${stats.size}/${stats.maxSize}
- Evictions: ${stats.evictions}`);
}
}