We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/kunwarVivek/mcp-github-project-manager'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
---
phase: 04-test-stabilization
plan: 04
type: execute
wave: 2
depends_on: [04-01, 04-02]
files_modified:
- src/__tests__/unit/context/DependencyContextGenerator.test.ts
- src/__tests__/unit/validation/ContextQualityValidator.test.ts
autonomous: true
must_haves:
truths:
- "DependencyContextGenerator has unit tests covering all public methods"
- "ContextQualityValidator has unit tests covering all validation logic"
- "Both services have AI-available and fallback paths tested"
- "Coverage on both services is 80%+"
artifacts:
- path: "src/__tests__/unit/context/DependencyContextGenerator.test.ts"
provides: "Unit tests for DependencyContextGenerator"
min_lines: 150
- path: "src/__tests__/unit/validation/ContextQualityValidator.test.ts"
provides: "Unit tests for ContextQualityValidator"
min_lines: 150
key_links:
- from: "test files"
to: "source services"
via: "import statements"
pattern: "import.*from.*services"
---
<objective>
Add unit tests for DependencyContextGenerator and ContextQualityValidator services.
Purpose: These services (485 and 486 lines respectively) currently have zero test coverage. They handle dependency analysis and context quality validation. Per DEBT-17 and DEBT-18, these need 80%+ coverage.
Output: Two new test files with comprehensive coverage of both services.
</objective>
<execution_context>
@/Users/vivek/.claude/get-shit-done/workflows/execute-plan.md
@/Users/vivek/.claude/get-shit-done/templates/summary.md
</execution_context>
<context>
@.planning/PROJECT.md
@.planning/ROADMAP.md
@.planning/STATE.md
@.planning/phases/04-test-stabilization/04-RESEARCH.md
</context>
<tasks>
<task type="auto">
<name>Task 1: Create DependencyContextGenerator tests</name>
<files>src/__tests__/unit/context/DependencyContextGenerator.test.ts</files>
<action>
Create comprehensive tests for DependencyContextGenerator.
First, read src/services/context/DependencyContextGenerator.ts to understand:
1. Public methods and their signatures
2. Dependencies (AIServiceFactory, etc.)
3. Return types and data structures
Create the test file:
```typescript
import { describe, it, expect, beforeEach, jest } from '@jest/globals';
import { DependencyContextGenerator } from '../../../services/context/DependencyContextGenerator';
import { AIServiceFactory } from '../../../services/ai/AIServiceFactory';
import { AITask, TaskStatus, TaskPriority, TaskComplexity, TaskDependency } from '../../../domain/ai-types';
jest.mock('../../../services/ai/AIServiceFactory');
jest.mock('ai', () => ({
generateObject: jest.fn()
}));
describe('DependencyContextGenerator', () => {
let generator: DependencyContextGenerator;
let mockFactory: any;
const mockTask: AITask = {
id: 'task-1',
title: 'Implement API endpoints',
description: 'Create REST API for user management',
status: TaskStatus.PENDING,
priority: TaskPriority.HIGH,
complexity: 5 as TaskComplexity,
estimatedHours: 8,
actualHours: 0,
aiGenerated: true,
subtasks: [],
dependencies: [
{ taskId: 'task-0', type: 'blocks', description: 'Needs database schema' }
],
acceptanceCriteria: [],
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
tags: ['api', 'backend']
};
const mockDependentTask: AITask = {
id: 'task-0',
title: 'Design database schema',
description: 'Create user table schema',
status: TaskStatus.COMPLETED,
priority: TaskPriority.HIGH,
complexity: 3 as TaskComplexity,
estimatedHours: 4,
actualHours: 3,
aiGenerated: true,
subtasks: [],
dependencies: [],
acceptanceCriteria: [],
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
tags: ['database']
};
const allTasks = [mockDependentTask, mockTask];
beforeEach(() => {
jest.clearAllMocks();
mockFactory = {
getBestAvailableModel: jest.fn().mockReturnValue({ modelId: 'test-model' }),
};
(AIServiceFactory.getInstance as jest.Mock).mockReturnValue(mockFactory);
generator = new DependencyContextGenerator();
});
describe('generateDependencyContext', () => {
it('should generate dependency context with AI when available', async () => {
const { generateObject } = require('ai');
generateObject.mockResolvedValue({
object: {
directDependencies: [{ taskId: 'task-0', relationship: 'blocks' }],
transitiveDependencies: [],
blockedBy: ['task-0'],
blocks: [],
criticalPath: false,
riskLevel: 'low'
}
});
const result = await generator.generateDependencyContext(mockTask, allTasks);
expect(result).toBeDefined();
expect(result?.directDependencies).toBeDefined();
});
it('should use fallback when AI is unavailable', async () => {
mockFactory.getBestAvailableModel.mockReturnValue(null);
generator = new DependencyContextGenerator();
const result = await generator.generateDependencyContext(mockTask, allTasks);
expect(result).toBeDefined();
// Fallback should analyze task.dependencies array directly
});
it('should handle task with no dependencies', async () => {
const independentTask = { ...mockTask, dependencies: [] };
const result = await generator.generateDependencyContext(independentTask, allTasks);
expect(result).toBeDefined();
expect(result?.directDependencies).toHaveLength(0);
});
it('should detect circular dependencies', async () => {
// Create circular dependency scenario
const taskA = { ...mockTask, id: 'a', dependencies: [{ taskId: 'b', type: 'blocks' as const }] };
const taskB = { ...mockTask, id: 'b', dependencies: [{ taskId: 'a', type: 'blocks' as const }] };
const result = await generator.generateDependencyContext(taskA, [taskA, taskB]);
// Should detect or handle circular dependency
expect(result).toBeDefined();
});
it('should handle missing dependent task', async () => {
// Task references a dependency that doesn't exist in allTasks
const result = await generator.generateDependencyContext(mockTask, [mockTask]);
expect(result).toBeDefined();
});
});
describe('error handling', () => {
it('should handle AI errors gracefully', async () => {
const { generateObject } = require('ai');
generateObject.mockRejectedValue(new Error('AI service error'));
const result = await generator.generateDependencyContext(mockTask, allTasks);
expect(result).toBeDefined();
});
it('should handle empty task list', async () => {
const result = await generator.generateDependencyContext(mockTask, []);
expect(result).toBeDefined();
});
});
});
```
Expand based on actual service methods discovered when reading the source file.
</action>
<verify>
Run tests:
```bash
npm test -- --testPathPattern="DependencyContextGenerator.test" --coverage --collectCoverageFrom="src/services/context/DependencyContextGenerator.ts" 2>&1 | grep -E "(PASS|FAIL|Stmts)"
```
Target: 80%+ coverage.
</verify>
<done>DependencyContextGenerator.test.ts exists with 80%+ coverage.</done>
</task>
<task type="auto">
<name>Task 2: Create ContextQualityValidator tests</name>
<files>src/__tests__/unit/validation/ContextQualityValidator.test.ts</files>
<action>
Create comprehensive tests for ContextQualityValidator.
First, ensure directory exists:
```bash
mkdir -p src/__tests__/unit/validation
```
Read src/services/validation/ContextQualityValidator.ts to understand:
1. Validation methods and their signatures
2. Quality metrics being validated
3. Threshold values and scoring logic
Create the test file:
```typescript
import { describe, it, expect, beforeEach, jest } from '@jest/globals';
import { ContextQualityValidator } from '../../../services/validation/ContextQualityValidator';
import { TaskExecutionContext, ContextQualityMetrics } from '../../../domain/ai-types';
describe('ContextQualityValidator', () => {
let validator: ContextQualityValidator;
const mockContext: TaskExecutionContext = {
businessObjective: 'Improve user authentication security',
userImpact: 'Users will have more secure login experience',
successMetrics: ['99% login success rate', 'Zero security breaches'],
parentFeature: {
id: 'f-1',
title: 'Authentication',
description: 'User authentication system',
userStories: ['As a user I can login securely'],
businessValue: 'Core security feature'
},
technicalConstraints: ['Must use OAuth 2.0', 'JWT tokens required'],
architecturalDecisions: ['Use bcrypt for password hashing'],
integrationPoints: ['Identity provider API', 'Session store'],
dataRequirements: ['User credentials', 'Session tokens'],
prdContextSummary: {
relevantObjectives: ['Secure authentication'],
relevantFeatures: ['Login', 'Registration'],
constraints: ['Mobile support required']
}
};
beforeEach(() => {
validator = new ContextQualityValidator();
});
describe('validateContext', () => {
it('should validate a complete context as high quality', () => {
const result = validator.validateContext(mockContext);
expect(result).toBeDefined();
expect(result.isValid).toBe(true);
expect(result.score).toBeGreaterThan(0.8);
});
it('should identify missing required fields', () => {
const incompleteContext = {
...mockContext,
businessObjective: '',
userImpact: ''
};
const result = validator.validateContext(incompleteContext);
expect(result.isValid).toBe(false);
expect(result.missingFields).toContain('businessObjective');
});
it('should calculate quality score correctly', () => {
const result = validator.validateContext(mockContext);
expect(result.score).toBeGreaterThanOrEqual(0);
expect(result.score).toBeLessThanOrEqual(1);
});
it('should handle empty context', () => {
const emptyContext = {} as TaskExecutionContext;
const result = validator.validateContext(emptyContext);
expect(result.isValid).toBe(false);
expect(result.score).toBe(0);
});
});
describe('validateMetrics', () => {
it('should validate quality metrics', () => {
const metrics: ContextQualityMetrics = {
completeness: 0.9,
relevance: 0.85,
specificity: 0.8,
actionability: 0.75,
traceability: 0.9,
generationTimeMs: 150,
tokenUsage: 500,
aiEnhanced: true,
errors: [],
warnings: []
};
const result = validator.validateMetrics(metrics);
expect(result).toBeDefined();
expect(result.meetsThreshold).toBe(true);
});
it('should flag low quality metrics', () => {
const lowMetrics: ContextQualityMetrics = {
completeness: 0.3,
relevance: 0.2,
specificity: 0.1,
actionability: 0.2,
traceability: 0.1,
generationTimeMs: 5000,
tokenUsage: 10000,
aiEnhanced: false,
errors: ['Generation failed'],
warnings: ['Fallback used']
};
const result = validator.validateMetrics(lowMetrics);
expect(result.meetsThreshold).toBe(false);
expect(result.issues.length).toBeGreaterThan(0);
});
});
describe('edge cases', () => {
it('should handle null values gracefully', () => {
const contextWithNulls = {
...mockContext,
successMetrics: null as any,
technicalConstraints: undefined as any
};
const result = validator.validateContext(contextWithNulls);
// Should not throw
expect(result).toBeDefined();
});
it('should handle empty arrays', () => {
const contextWithEmptyArrays = {
...mockContext,
successMetrics: [],
technicalConstraints: [],
integrationPoints: []
};
const result = validator.validateContext(contextWithEmptyArrays);
expect(result).toBeDefined();
// Empty arrays might reduce score but shouldn't fail
});
});
});
```
Expand based on actual validator methods discovered when reading the source file.
</action>
<verify>
Run tests:
```bash
npm test -- --testPathPattern="ContextQualityValidator.test" --coverage --collectCoverageFrom="src/services/validation/ContextQualityValidator.ts" 2>&1 | grep -E "(PASS|FAIL|Stmts)"
```
Target: 80%+ coverage.
</verify>
<done>ContextQualityValidator.test.ts exists with 80%+ coverage.</done>
</task>
</tasks>
<verification>
Run all new context tests:
```bash
npm test -- --testPathPattern="(DependencyContextGenerator|ContextQualityValidator)" --coverage 2>&1 | tail -20
```
Expected:
- Both test files pass
- DependencyContextGenerator: 80%+ coverage
- ContextQualityValidator: 80%+ coverage
</verification>
<success_criteria>
1. DependencyContextGenerator.test.ts exists with 10+ test cases
2. ContextQualityValidator.test.ts exists with 10+ test cases
3. Both services have AI and fallback paths tested
4. Coverage metrics: 80%+ statements for both
5. All tests pass
</success_criteria>
<output>
After completion, create `.planning/phases/04-test-stabilization/04-04-SUMMARY.md`
</output>