configure_output_masking
Configure content masking for MCP server outputs to protect sensitive data using strategies like full, partial, or placeholder masking.
Instructions
Configure content masking for all MCP outputs
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| enabled | No | Enable or disable output masking | |
| strategy | No | Masking strategy to use | |
| customPatterns | No | Custom patterns to mask | |
| action | No | Configuration action | get |
Implementation Reference
- src/tools/tool-catalog.ts:572-590 (registration)Registration of the 'configure_output_masking' tool in the central TOOL_CATALOG, including metadata, input schema, and relationships to other tools.TOOL_CATALOG.set('configure_output_masking', { name: 'configure_output_masking', shortDescription: 'Configure output masking settings', fullDescription: 'Configures global output masking settings and rules.', category: 'content-security', complexity: 'simple', tokenCost: { min: 200, max: 400 }, hasCEMCPDirective: true, // Phase 4.3: Simple tool - settings configuration relatedTools: ['apply_basic_content_masking', 'configure_custom_patterns'], keywords: ['configure', 'output', 'masking', 'settings'], requiresAI: false, inputSchema: { type: 'object', properties: { enabled: { type: 'boolean' }, level: { type: 'string', enum: ['minimal', 'standard', 'aggressive'] }, }, }, });
- src/utils/output-masking.ts:9-269 (helper)Core MaskingConfig interface and createMaskingConfig function that directly corresponds to the tool's purpose of configuring output masking settings. Maps to inputSchema (enabled/level -> strategy). Provides maskMcpResponse and withContentMasking middleware for applying the configuration.* Configuration for output masking behavior */ export interface MaskingConfig { /** Whether masking is enabled */ enabled: boolean; /** Masking strategy to apply */ strategy: 'full' | 'partial' | 'placeholder' | 'environment'; /** Custom patterns to mask */ customPatterns?: string[]; /** Patterns to skip during masking */ skipPatterns?: string[]; } /** * Default masking configuration */ const DEFAULT_MASKING_CONFIG: MaskingConfig = { enabled: true, strategy: 'partial', customPatterns: [], skipPatterns: [ '[REDACTED]', '[API_KEY_REDACTED]', '[PASSWORD_REDACTED]', '[EMAIL_REDACTED]', '[IP_ADDRESS_REDACTED]', ], }; /** * Apply content masking to MCP response content * * @param response - The MCP response to mask * @param config - Masking configuration to use * @returns Promise resolving to the masked response * @throws McpAdrError if masking fails */ export async function maskMcpResponse( response: any, config: MaskingConfig = DEFAULT_MASKING_CONFIG ): Promise<any> { if (!config.enabled) { return response; } try { // Deep clone the response to avoid modifying the original const maskedResponse = JSON.parse(JSON.stringify(response)); // Apply masking to different response types if (maskedResponse.content && Array.isArray(maskedResponse.content)) { // Tool response with content array for (const contentItem of maskedResponse.content) { if (contentItem.type === 'text' && contentItem.text) { contentItem.text = await maskContent(contentItem.text, config); } } } else if (maskedResponse.contents && Array.isArray(maskedResponse.contents)) { // Resource response with contents array for (const contentItem of maskedResponse.contents) { if (contentItem.text) { contentItem.text = await maskContent(contentItem.text, config); } } } else if (maskedResponse.messages && Array.isArray(maskedResponse.messages)) { // Prompt response with messages array for (const message of maskedResponse.messages) { if (message.content && message.content.text) { message.content.text = await maskContent(message.content.text, config); } } } return maskedResponse; } catch (error) { throw new McpAdrError( `Failed to mask MCP response: ${error instanceof Error ? error.message : String(error)}`, 'MASKING_ERROR' ); } } /** * Apply content masking to a text string */ async function maskContent(content: string, config: MaskingConfig): Promise<string> { try { // Skip if content is already masked if (config.skipPatterns?.some(pattern => content.includes(pattern))) { return content; } // Apply basic masking patterns const { applyBasicMasking } = await import('./content-masking.js'); const strategy = config.strategy === 'environment' ? 'placeholder' : config.strategy; return applyBasicMasking(content, strategy); } catch (error) { // If masking fails, return original content with warning // Log to stderr to avoid corrupting MCP protocol console.error('[WARN] Content masking failed:', error); return content; } } /** * Generate AI-powered masking for sensitive content */ export async function generateAiMasking( content: string, contentType: 'code' | 'documentation' | 'configuration' | 'logs' | 'general' = 'general' ): Promise<{ maskedContent: string; analysisPrompt: string }> { try { const { generateSensitiveContentDetectionPrompt } = await import( '../prompts/security-prompts.js' ); const analysisPrompt = generateSensitiveContentDetectionPrompt(content, contentType); // For now, apply basic masking as fallback const { applyBasicMasking } = await import('./content-masking.js'); const maskedContent = applyBasicMasking(content, 'partial'); return { maskedContent, analysisPrompt: ` # AI-Powered Content Masking Available The following content has been processed with basic masking. For enhanced AI-powered masking, use the analysis prompt below: ## Basic Masked Content \`\`\` ${maskedContent} \`\`\` ## AI Analysis Prompt ${analysisPrompt} ## Instructions 1. Submit the AI analysis prompt to detect sensitive information 2. Use the results with the \`generate_content_masking\` tool for intelligent masking 3. Apply the enhanced masking for better security `, }; } catch (error) { throw new McpAdrError( `Failed to generate AI masking: ${error instanceof Error ? error.message : String(error)}`, 'MASKING_ERROR' ); } } /** * Create masking configuration from environment or defaults */ export function createMaskingConfig(overrides?: Partial<MaskingConfig>): MaskingConfig { const envConfig: Partial<MaskingConfig> = { enabled: process.env['MCP_MASKING_ENABLED'] !== 'false', strategy: (process.env['MCP_MASKING_STRATEGY'] as any) || 'partial', }; return { ...DEFAULT_MASKING_CONFIG, ...envConfig, ...overrides, }; } /** * Validate masking configuration */ export function validateMaskingConfig(config: MaskingConfig): { isValid: boolean; errors: string[]; } { const errors: string[] = []; if (typeof config.enabled !== 'boolean') { errors.push('enabled must be a boolean'); } if (!['full', 'partial', 'placeholder', 'environment'].includes(config.strategy)) { errors.push('strategy must be one of: full, partial, placeholder, environment'); } if (config.customPatterns && !Array.isArray(config.customPatterns)) { errors.push('customPatterns must be an array'); } if (config.skipPatterns && !Array.isArray(config.skipPatterns)) { errors.push('skipPatterns must be an array'); } return { isValid: errors.length === 0, errors, }; } /** * Middleware wrapper for MCP tool responses */ export function withContentMasking<T extends (..._args: any[]) => Promise<any>>( toolFunction: T, config?: MaskingConfig ): T { return (async (...args: any[]) => { const response = await toolFunction(...args); const maskingConfig = config || createMaskingConfig(); return await maskMcpResponse(response, maskingConfig); }) as T; } /** * Apply progressive masking based on content sensitivity */ export async function applyProgressiveMasking( content: string, sensitivityLevel: 'low' | 'medium' | 'high' | 'critical' = 'medium' ): Promise<string> { const strategies: Record<string, 'full' | 'partial' | 'placeholder'> = { low: 'placeholder', medium: 'partial', high: 'full', critical: 'full', }; const strategy = strategies[sensitivityLevel]; const { applyBasicMasking } = await import('./content-masking.js'); return applyBasicMasking(content, strategy); } /** * Detect content sensitivity level using heuristics */ export function detectContentSensitivity(content: string): 'low' | 'medium' | 'high' | 'critical' { const criticalPatterns = [/password/gi, /secret/gi, /private.*key/gi, /api.*key/gi, /token/gi]; const highPatterns = [ /@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/g, // emails /\b(?:\d{1,3}\.){3}\d{1,3}\b/g, // IP addresses /\b[A-Z0-9]{20,}\b/g, // potential keys/tokens ]; const mediumPatterns = [/localhost/gi, /127\.0\.0\.1/g, /config/gi, /env/gi]; if (criticalPatterns.some(pattern => pattern.test(content))) { return 'critical'; } if (highPatterns.some(pattern => pattern.test(content))) { return 'high'; } if (mediumPatterns.some(pattern => pattern.test(content))) { return 'medium'; } return 'low'; }
- src/utils/content-masking.ts:1-316 (helper)Supporting utilities for content masking including basic regex-based masking and validation functions used by the output masking system./** * Content masking utilities using prompt-driven AI analysis * Implements intelligent sensitive content detection and masking */ import { McpAdrError } from '../types/index.js'; export interface SensitiveItem { type: string; category: string; content: string; startPosition: number; endPosition: number; confidence: number; reasoning: string; severity: 'low' | 'medium' | 'high' | 'critical'; suggestedMask: string; } export interface SensitiveContentAnalysis { hasSensitiveContent: boolean; detectedItems: SensitiveItem[]; recommendations: string[]; overallRisk: 'low' | 'medium' | 'high' | 'critical'; summary: string; } export interface MaskingResult { maskedContent: string; maskingApplied: Array<{ originalContent: string; maskedWith: string; position: string; reason: string; }>; preservedStructure: boolean; readabilityScore: number; securityScore: number; recommendations: string[]; } export interface CustomPattern { name: string; description: string; regex: string; category: string; severity: 'low' | 'medium' | 'high' | 'critical'; examples: string[]; falsePositives: string[]; maskingStrategy: 'full' | 'partial' | 'placeholder' | 'environment'; } /** * Analyze content for sensitive information using AI prompts */ export async function analyzeSensitiveContent( content: string, contentType: 'code' | 'documentation' | 'configuration' | 'logs' | 'general' = 'general', userDefinedPatterns?: string[] ): Promise<{ analysisPrompt: string; instructions: string }> { try { const { generateSensitiveContentDetectionPrompt } = await import( '../prompts/security-prompts.js' ); const analysisPrompt = generateSensitiveContentDetectionPrompt( content, contentType, userDefinedPatterns ); const instructions = ` # Sensitive Content Analysis Instructions This analysis will help identify sensitive information that should be masked or redacted. ## Next Steps: 1. **Review the generated prompt** for sensitive content detection 2. **Submit the prompt to an AI agent** for analysis 3. **Parse the JSON response** to get detected sensitive items 4. **Apply appropriate masking** based on the results ## Expected AI Response Format: The AI will return a JSON object with: - \`hasSensitiveContent\`: boolean indicating if sensitive content was found - \`detectedItems\`: array of sensitive items with positions and severity - \`recommendations\`: security recommendations - \`overallRisk\`: risk assessment (low/medium/high/critical) ## Usage Example: \`\`\`typescript const result = await analyzeSensitiveContent(fileContent, 'code'); // Submit result.analysisPrompt to AI agent // Parse AI response as SensitiveContentAnalysis \`\`\` `; return { analysisPrompt, instructions, }; } catch (error) { throw new McpAdrError( `Failed to generate sensitive content analysis: ${error instanceof Error ? error.message : String(error)}`, 'ANALYSIS_ERROR' ); } } /** * Generate content masking prompt for AI processing */ export async function generateMaskingInstructions( content: string, detectedSensitiveItems: SensitiveItem[], maskingStrategy: 'full' | 'partial' | 'placeholder' | 'environment' = 'full' ): Promise<{ maskingPrompt: string; instructions: string }> { try { const { generateContentMaskingPrompt } = await import('../prompts/security-prompts.js'); const maskingPrompt = generateContentMaskingPrompt( content, detectedSensitiveItems, maskingStrategy ); const instructions = ` # Content Masking Instructions This will apply intelligent masking to content based on detected sensitive information. ## Masking Strategy: ${maskingStrategy} ## Next Steps: 1. **Review the generated masking prompt** 2. **Submit to AI agent** for intelligent masking 3. **Parse the JSON response** to get masked content 4. **Validate the results** for security and usability ## Expected AI Response Format: The AI will return a JSON object with: - \`maskedContent\`: the content with sensitive information masked - \`maskingApplied\`: details of what was masked and how - \`readabilityScore\`: how readable the masked content remains - \`securityScore\`: how secure the masking is ## Usage Example: \`\`\`typescript const result = await generateMaskingInstructions(content, sensitiveItems, 'partial'); // Submit result.maskingPrompt to AI agent // Parse AI response as MaskingResult \`\`\` `; return { maskingPrompt, instructions, }; } catch (error) { throw new McpAdrError( `Failed to generate masking instructions: ${error instanceof Error ? error.message : String(error)}`, 'MASKING_ERROR' ); } } /** * Generate custom pattern configuration prompt */ export async function generateCustomPatternConfiguration( projectContext: string, existingPatterns?: string[] ): Promise<{ configurationPrompt: string; instructions: string }> { try { const { generateCustomPatternConfigurationPrompt } = await import( '../prompts/security-prompts.js' ); const configurationPrompt = generateCustomPatternConfigurationPrompt( projectContext, existingPatterns ); const instructions = ` # Custom Pattern Configuration Instructions This will help configure project-specific sensitive information patterns. ## Next Steps: 1. **Review the generated configuration prompt** 2. **Submit to AI agent** for pattern recommendations 3. **Parse the JSON response** to get custom patterns 4. **Integrate patterns** into the detection system ## Expected AI Response Format: The AI will return a JSON object with: - \`customPatterns\`: array of project-specific patterns - \`recommendations\`: additional security recommendations - \`integrationNotes\`: notes on pattern integration ## Usage Example: \`\`\`typescript const result = await generateCustomPatternConfiguration(projectInfo); // Submit result.configurationPrompt to AI agent // Parse AI response to get CustomPattern[] \`\`\` `; return { configurationPrompt, instructions, }; } catch (error) { throw new McpAdrError( `Failed to generate pattern configuration: ${error instanceof Error ? error.message : String(error)}`, 'CONFIGURATION_ERROR' ); } } /** * Apply basic masking patterns (fallback when AI is not available) */ export function applyBasicMasking( content: string, maskingStrategy: 'full' | 'partial' | 'placeholder' = 'full' ): string { // Basic patterns for common sensitive information const patterns = [ // API Keys { pattern: /sk-[a-zA-Z0-9]{32,}/g, replacement: maskingStrategy === 'partial' ? 'sk-...****' : '[API_KEY_REDACTED]', }, { pattern: /ghp_[a-zA-Z0-9]{36}/g, replacement: maskingStrategy === 'partial' ? 'ghp_...****' : '[GITHUB_TOKEN_REDACTED]', }, // AWS Keys { pattern: /AKIA[0-9A-Z]{16}/g, replacement: maskingStrategy === 'partial' ? 'AKIA...****' : '[AWS_ACCESS_KEY_REDACTED]', }, // Email addresses { pattern: /\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b/g, replacement: maskingStrategy === 'partial' ? '***@***.***' : '[EMAIL_REDACTED]', }, // IP Addresses (private ranges) { pattern: /\b(?:10\.|172\.(?:1[6-9]|2[0-9]|3[01])\.|192\.168\.)\d{1,3}\.\d{1,3}\b/g, replacement: '[IP_ADDRESS_REDACTED]', }, // Common password patterns { pattern: /password\s*[:=]\s*["']?[^"'\s]+["']?/gi, replacement: 'password=[PASSWORD_REDACTED]', }, ]; let maskedContent = content; for (const { pattern, replacement } of patterns) { maskedContent = maskedContent.replace(pattern, replacement); } return maskedContent; } /** * Validate that content has been properly masked */ export function validateMasking( originalContent: string, maskedContent: string ): { isValid: boolean; issues: string[]; securityScore: number; } { const issues: string[] = []; let securityScore = 1.0; // Check for common patterns that should have been masked const sensitivePatterns = [ /sk-[a-zA-Z0-9]{32,}/g, /ghp_[a-zA-Z0-9]{36}/g, /AKIA[0-9A-Z]{16}/g, /password\s*[:=]\s*["']?[^"'\s\\[\\]]+["']?/gi, ]; for (const pattern of sensitivePatterns) { const matches = maskedContent.match(pattern); if (matches) { issues.push(`Potential unmasked sensitive content found: ${matches[0].substring(0, 10)}...`); securityScore -= 0.2; } } // Check that masking was actually applied if (originalContent === maskedContent) { issues.push('No masking appears to have been applied'); securityScore = 0; } return { isValid: issues.length === 0, issues, securityScore: Math.max(0, securityScore), }; }
- Related configuration tool for custom masking patterns, listed as relatedTool in catalog. Part of the content-security tool suite.export async function configureCustomPatterns(args: { projectPath: string; existingPatterns?: string[]; }): Promise<any> { const { projectPath, existingPatterns } = args; try { // Use actual file operations to scan project structure const { scanProjectStructure } = await import('../utils/actual-file-operations.js'); // Actually scan project structure const projectStructure = await scanProjectStructure(projectPath, { readContent: true, maxFileSize: 10000, }); const customPatternPrompt = ` # Custom Pattern Configuration Generation Based on actual project structure analysis, here are the findings: ## Project Structure - **Root Path**: ${projectStructure.rootPath} - **Total Files**: ${projectStructure.totalFiles} - **Directories**: ${projectStructure.directories.join(', ')} ## Package Management Files ${ projectStructure.packageFiles.length > 0 ? projectStructure.packageFiles .map( f => ` ### ${f.filename} \`\`\` ${f.content.slice(0, 500)}${f.content.length > 500 ? '\n... (truncated)' : ''} \`\`\` ` ) .join('\n') : '- No package files found' } ## Environment Configuration Files ${ projectStructure.environmentFiles.length > 0 ? projectStructure.environmentFiles .map( f => ` ### ${f.filename} \`\`\` ${f.content.slice(0, 300)}${f.content.length > 300 ? '\n... (truncated)' : ''} \`\`\` ` ) .join('\n') : '- No environment files found' } ## Configuration Files ${ projectStructure.configFiles.length > 0 ? projectStructure.configFiles .map( f => ` ### ${f.filename} \`\`\` ${f.content.slice(0, 300)}${f.content.length > 300 ? '\n... (truncated)' : ''} \`\`\` ` ) .join('\n') : '- No config files found' } ## Script Files ${ projectStructure.scriptFiles.length > 0 ? projectStructure.scriptFiles .map( f => ` ### ${f.filename} \`\`\` ${f.content.slice(0, 400)}${f.content.length > 400 ? '\n... (truncated)' : ''} \`\`\` ` ) .join('\n') : '- No script files found' } ## Existing Patterns Context ${ existingPatterns ? ` ### Current Patterns (${existingPatterns.length}) ${existingPatterns .map( (pattern, index) => ` #### ${index + 1}. ${pattern} ` ) .join('')} ` : 'No existing patterns provided.' } ## Pattern Generation Requirements 1. **Analyze project-specific content types** that need masking based on actual file content 2. **Identify sensitive data patterns** in code and documentation shown above 3. **Generate regex patterns** for consistent content masking 4. **Create appropriate replacements** that maintain context 5. **Ensure patterns don't conflict** with existing ones 6. **Provide clear descriptions** for each pattern ## Required Output Format Please provide custom pattern configuration in JSON format: \`\`\`json { "patterns": [ { "name": "pattern-name", "pattern": "regex-pattern", "replacement": "replacement-text", "description": "pattern-description", "category": "pattern-category" } ], "recommendations": ["list", "of", "recommendations"], "conflicts": ["any", "potential", "conflicts"] } \`\`\` `; const instructions = ` # Custom Pattern Configuration Instructions This analysis provides **actual project file contents** for comprehensive pattern generation. ## Analysis Scope - **Project Path**: ${projectPath} - **Package Files**: ${projectStructure.packageFiles.length} found - **Environment Files**: ${projectStructure.environmentFiles.length} found - **Config Files**: ${projectStructure.configFiles.length} found - **Script Files**: ${projectStructure.scriptFiles.length} found - **Total Files Analyzed**: ${projectStructure.totalFiles} - **Existing Patterns**: ${existingPatterns?.length || 0} patterns ## Next Steps 1. **Submit the configuration prompt** to an AI agent for pattern analysis 2. **Parse the JSON response** to get custom patterns and recommendations 3. **Review generated patterns** for accuracy and completeness 4. **Implement patterns** in the content masking system ## Expected AI Response Format The AI will return a JSON object with: - \`patterns\`: Array of custom pattern configurations - \`recommendations\`: Best practices and implementation guidance - \`conflicts\`: Potential conflicts with existing patterns ## Usage Example \`\`\`typescript const result = await configureCustomPatterns({ projectPath, existingPatterns }); // Submit result.configurationPrompt to AI agent // Parse AI response for custom pattern configuration \`\`\` `; const result = { configurationPrompt: customPatternPrompt, instructions, actualData: { projectStructure, summary: { totalFiles: projectStructure.totalFiles, packageFiles: projectStructure.packageFiles.length, environmentFiles: projectStructure.environmentFiles.length, configFiles: projectStructure.configFiles.length, scriptFiles: projectStructure.scriptFiles.length, }, }, }; return { content: [ { type: 'text', text: `# Custom Pattern Configuration\n\n${result.instructions}\n\n## AI Configuration Prompt\n\n${result.configurationPrompt}`, }, ], }; } catch (error) { throw new McpAdrError( `Failed to configure custom patterns: ${error instanceof Error ? error.message : String(error)}`, 'CONFIGURATION_ERROR' ); } } /** * Apply basic masking (fallback when AI is not available) */ export async function applyBasicContentMasking(args: { content: string; maskingStrategy?: 'full' | 'partial' | 'placeholder'; }): Promise<any> { const { content, maskingStrategy = 'full' } = args; try { const { applyBasicMasking, validateMasking } = await import('../utils/content-masking.js'); if (!content || content.trim().length === 0) { throw new McpAdrError('Content is required for masking', 'INVALID_INPUT'); } const maskedContent = applyBasicMasking(content, maskingStrategy); const validation = validateMasking(content, maskedContent); return { content: [ { type: 'text', text: `# Basic Content Masking Applied ## Masking Strategy ${maskingStrategy} ## Original Content Length ${content.length} characters ## Masked Content \`\`\` ${maskedContent} \`\`\` ## Validation Results - **Security Score**: ${(validation.securityScore * 100).toFixed(1)}% - **Is Valid**: ${validation.isValid ? '✅ Yes' : '❌ No'} ${ validation.issues.length > 0 ? `## Issues Found ${validation.issues.map(issue => `- ${issue}`).join('\n')}` : '## ✅ No Issues Found' } ## Recommendations - For better security analysis, use AI-powered detection with \`analyze_content_security\` - Consider using custom patterns for project-specific sensitive information - Review masked content to ensure it maintains necessary functionality `, }, ], }; } catch (error) { throw new McpAdrError( `Failed to apply basic masking: ${error instanceof Error ? error.message : String(error)}`, 'MASKING_ERROR' ); } } /** * Validate that content masking was applied correctly */ export async function validateContentMasking(args: { originalContent: string; maskedContent: string; }): Promise<any> { const { originalContent, maskedContent } = args; try { const { validateMasking } = await import('../utils/content-masking.js'); if (!originalContent || !maskedContent) { throw new McpAdrError('Both original and masked content are required', 'INVALID_INPUT'); } const validation = validateMasking(originalContent, maskedContent); return { content: [ { type: 'text', text: `# Content Masking Validation ## Validation Results - **Security Score**: ${(validation.securityScore * 100).toFixed(1)}% - **Is Valid**: ${validation.isValid ? '✅ Yes' : '❌ No'} ## Content Comparison - **Original Length**: ${originalContent.length} characters - **Masked Length**: ${maskedContent.length} characters - **Size Change**: ${((maskedContent.length / originalContent.length - 1) * 100).toFixed(1)}% ${ validation.issues.length > 0 ? `## ⚠️ Issues Found ${validation.issues.map(issue => `- ${issue}`).join('\n')} ## Recommendations - Review the masking process to address identified issues - Consider using more comprehensive AI-powered masking - Ensure all sensitive patterns are properly detected and masked` : '## ✅ Validation Passed' } ## Security Assessment ${ validation.securityScore >= 0.9 ? '🟢 **Excellent**: Content appears to be properly masked' : validation.securityScore >= 0.7 ? '🟡 **Good**: Minor issues detected, review recommended' : validation.securityScore >= 0.5 ? '🟠 **Fair**: Several issues found, masking needs improvement' : '🔴 **Poor**: Significant security issues, masking failed' } `, }, ], }; } catch (error) { throw new McpAdrError( `Failed to validate masking: ${error instanceof Error ? error.message : String(error)}`, 'VALIDATION_ERROR' ); } } /** * Helper methods for SecurityMemoryManager */ function calculateOverallRisk(detectedPatterns: any[]): string {