Skip to main content
Glama
generatePrompt.ts3.42 kB
/** * Generate Prompt Tool * Transforms user ideas into well-structured prompts * * Uses PromptArchitect backend API with fallback to local template generation. */ import { z } from 'zod'; import { logger, apiGeneratePrompt, isApiClientAvailable, } from '../utils/index.js'; export const generatePromptSchema = z.object({ idea: z.string().min(1).describe('The user\'s raw prompt idea or concept'), template: z.enum(['coding', 'writing', 'research', 'analysis', 'factcheck', 'general']) .optional() .default('general') .describe('Template type to use for generation'), context: z.string().optional().describe('Additional context or constraints'), targetModel: z.enum(['gpt-4', 'claude', 'gemini', 'general']) .optional() .default('general') .describe('Target AI model to optimize for'), workspaceContext: z.string().optional().describe('Project context including file structure, tech stack, dependencies, and any relevant code snippets to ensure the generated prompt aligns with the project scope'), }); export type GeneratePromptInput = z.infer<typeof generatePromptSchema>; export async function generatePrompt(input: GeneratePromptInput): Promise<{ prompt: string; template: string; metadata: { estimatedTokens: number; wordCount: number; hasStructure: boolean; }; }> { const { idea, template = 'general', context, targetModel, workspaceContext } = input; logger.info('Generating prompt', { template, targetModel, ideaLength: idea.length, hasWorkspaceContext: !!workspaceContext }); let generatedPrompt: string = ''; // Use PromptArchitect API if (isApiClientAvailable()) { try { const response = await apiGeneratePrompt({ idea, template, context, targetModel, workspaceContext, }); generatedPrompt = response.prompt; logger.info('Generated via PromptArchitect API'); } catch (error) { logger.warn('API request failed, using fallback', { error: error instanceof Error ? error.message : 'Unknown error' }); } } // Fallback template generation if (!generatedPrompt) { generatedPrompt = createFallbackPrompt(idea, template, context); logger.warn('Using fallback prompt generation'); } // Calculate metadata const wordCount = generatedPrompt.split(/\s+/).length; const estimatedTokens = Math.ceil(wordCount * 1.3); // Rough estimate const hasStructure = /^#+\s|^\d+\.|^-\s|^\*\s/m.test(generatedPrompt); return { prompt: generatedPrompt, template, metadata: { estimatedTokens, wordCount, hasStructure, }, }; } function createFallbackPrompt(idea: string, template: string, context?: string): string { const sections = [ '## Objective', idea, '', '## Context', context || 'No additional context provided.', '', '## Requirements', '- Provide a clear, detailed response', '- Use structured formatting where appropriate', '- Include relevant examples if helpful', '', '## Output Format', 'Respond with a well-organized answer that directly addresses the objective.', ]; if (template === 'coding') { sections.push('', '## Technical Notes', '- Include code examples where applicable', '- Consider edge cases', '- Follow best practices'); } return sections.join('\n'); } export default generatePrompt;

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/MerabyLabs/promptarchitect-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server