Skip to main content
Glama
cpu-throttling.test.js8.89 kB
/** * Resource tests for CPU throttling * Tests BATCH_DELAY_MS enforcement and rate limiting */ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest' import { createEmbeddingMock, BATCH_SIZE, BATCH_DELAY_MS } from '../helpers/indexing-mocks.js' import { generateTestEmails } from '../helpers/test-data-generators.js' import { measureTime, wait } from '../helpers/performance-utils.js' describe('CPU Throttling', () => { let mockEmbedder beforeEach(() => { vi.clearAllMocks() const embedding = createEmbeddingMock() mockEmbedder = embedding.mockEmbedder }) describe('BATCH_DELAY_MS verification', () => { it('should have BATCH_DELAY_MS = 100', () => { expect(BATCH_DELAY_MS).toBe(100) }) it('should apply delay between batches', async () => { const count = 64 // 2 batches const texts = generateTestEmails(count).map(e => e.content) const batchTimes = [] for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) const batchIndex = Math.floor(i / BATCH_SIZE) // Apply delay before all but first batch if (batchIndex > 0) { await wait(BATCH_DELAY_MS) } batchTimes.push(performance.now()) await mockEmbedder(batch, { pooling: 'mean', normalize: true }) } // Check that batches are spaced by at least BATCH_DELAY_MS for (let i = 1; i < batchTimes.length; i++) { const gap = batchTimes[i] - batchTimes[i - 1] expect(gap).toBeGreaterThanOrEqual(BATCH_DELAY_MS * 0.9) // 10% tolerance } }) it('should not apply delay after last batch', async () => { const texts = generateTestEmails(BATCH_SIZE).map(e => e.content) // Exactly 1 batch let delayCount = 0 for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) const batchIndex = Math.floor(i / BATCH_SIZE) const isLastBatch = i + BATCH_SIZE >= texts.length await mockEmbedder(batch, { pooling: 'mean', normalize: true }) if (!isLastBatch) { await wait(BATCH_DELAY_MS) delayCount++ } } // No delay should be applied for single batch expect(delayCount).toBe(0) }) }) describe('rate limiting with fake timers', () => { beforeEach(() => { vi.useFakeTimers() }) afterEach(() => { vi.useRealTimers() }) it('should respect batch delays with fake timers', async () => { const count = 96 // 3 batches const texts = generateTestEmails(count).map(e => e.content) let currentTime = 0 const batchStartTimes = [] const processBatch = async (batch, batchIndex) => { if (batchIndex > 0) { // Wait for delay const delayPromise = new Promise(r => setTimeout(r, BATCH_DELAY_MS)) vi.advanceTimersByTime(BATCH_DELAY_MS) await delayPromise } batchStartTimes.push(Date.now()) await mockEmbedder(batch, { pooling: 'mean', normalize: true }) } for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) const batchIndex = Math.floor(i / BATCH_SIZE) await processBatch(batch, batchIndex) } // Verify correct number of batches processed expect(batchStartTimes.length).toBe(3) }) it('should accumulate correct total delay time', async () => { const batchCount = 5 const count = batchCount * BATCH_SIZE const texts = generateTestEmails(count).map(e => e.content) let totalDelayTime = 0 for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) const batchIndex = Math.floor(i / BATCH_SIZE) if (batchIndex > 0) { totalDelayTime += BATCH_DELAY_MS vi.advanceTimersByTime(BATCH_DELAY_MS) } await mockEmbedder(batch, { pooling: 'mean', normalize: true }) } // Total delay should be (batchCount - 1) * BATCH_DELAY_MS const expectedDelay = (batchCount - 1) * BATCH_DELAY_MS expect(totalDelayTime).toBe(expectedDelay) }) }) describe('throttled processing simulation', () => { it('should add overhead from throttling', async () => { const count = 96 // 3 batches const texts = generateTestEmails(count).map(e => e.content) // Without throttling const { duration: unthrottled } = await measureTime(async () => { for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) await mockEmbedder(batch, { pooling: 'mean', normalize: true }) } }) // With throttling const { duration: throttled } = await measureTime(async () => { for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) const batchIndex = Math.floor(i / BATCH_SIZE) if (batchIndex > 0) { await wait(BATCH_DELAY_MS) } await mockEmbedder(batch, { pooling: 'mean', normalize: true }) } }) const expectedMinOverhead = (Math.ceil(count / BATCH_SIZE) - 1) * BATCH_DELAY_MS const actualOverhead = throttled - unthrottled console.log(`Unthrottled: ${unthrottled.toFixed(1)}ms`) console.log(`Throttled: ${throttled.toFixed(1)}ms`) console.log(`Expected min overhead: ${expectedMinOverhead}ms`) console.log(`Actual overhead: ${actualOverhead.toFixed(1)}ms`) // Throttled should be slower by approximately the delay time expect(actualOverhead).toBeGreaterThan(expectedMinOverhead * 0.8) }) }) describe('batch interval tracking', () => { it('should track intervals between batches', async () => { const count = 128 // 4 batches const texts = generateTestEmails(count).map(e => e.content) const batchEndTimes = [] for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) await mockEmbedder(batch, { pooling: 'mean', normalize: true }) // Record time after processing (before delay) batchEndTimes.push(performance.now()) // Apply delay between batches const isLastBatch = i + BATCH_SIZE >= texts.length if (!isLastBatch) { await wait(BATCH_DELAY_MS) } } // Should have 4 batch end times expect(batchEndTimes.length).toBe(4) // Calculate actual elapsed time between batches const intervals = [] for (let i = 1; i < batchEndTimes.length; i++) { intervals.push(batchEndTimes[i] - batchEndTimes[i - 1]) } // Each interval should include at least the delay time for (const interval of intervals) { expect(interval).toBeGreaterThanOrEqual(BATCH_DELAY_MS * 0.8) } }) }) describe('yield to event loop', () => { it('should allow other operations during delays', async () => { const count = 64 // 2 batches const texts = generateTestEmails(count).map(e => e.content) let otherWorkDone = false // Start batch processing const batchPromise = (async () => { for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) const batchIndex = Math.floor(i / BATCH_SIZE) if (batchIndex > 0) { await wait(BATCH_DELAY_MS) } await mockEmbedder(batch, { pooling: 'mean', normalize: true }) } })() // Try to do other work concurrently const otherWork = (async () => { await wait(50) // Small delay otherWorkDone = true })() await Promise.all([batchPromise, otherWork]) expect(otherWorkDone).toBe(true) }) }) describe('adaptive throttling simulation', () => { it('should demonstrate adaptive delay concept', async () => { // This test demonstrates how adaptive throttling could work const count = 128 const texts = generateTestEmails(count).map(e => e.content) let baseDelay = BATCH_DELAY_MS const delaysUsed = [] for (let i = 0; i < texts.length; i += BATCH_SIZE) { const batch = texts.slice(i, i + BATCH_SIZE) const batchIndex = Math.floor(i / BATCH_SIZE) if (batchIndex > 0) { delaysUsed.push(baseDelay) await wait(baseDelay) } await mockEmbedder(batch, { pooling: 'mean', normalize: true }) } // Verify all delays were applied expect(delaysUsed.length).toBe(Math.ceil(count / BATCH_SIZE) - 1) // All delays should be the base delay (no adaptive changes in this test) expect(delaysUsed.every(d => d === BATCH_DELAY_MS)).toBe(true) }) }) })

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/sfls1397/Apple-Tools-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server