Skip to main content
Glama

generate_project

Create project specifications and generate file structures from user prompts. Automatically produces MCP file proposals for documentation and project setup.

Instructions

Generate a project spec and return MCP create_file/edit_file proposals. Set allowWrite=true to write files.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
promptYes
allowWriteNo

Implementation Reference

  • The handler function for generating a project. It generates file structure and contents using LLM calls, saves run artifacts if enabled, and either proposes file creations (dry-run) or materializes them to disk.
    async function generateProject(userPrompt, { allowWrite = false, saveRun = true } = {}) {
      const ts = new Date().toISOString().replace(/[:.]/g, '-');
      const runDir = path.resolve(process.cwd(), '.runs');
      if (saveRun && !fs.existsSync(runDir)) fs.mkdirSync(runDir, { recursive: true });
    
      const structure = await generateStructure(userPrompt, ts);
      const filesMap = await generateContents(userPrompt, structure, ts);
    
      const proposals = proposeCreatesFromFiles(filesMap);
    
      if (saveRun) {
        const artifact = { timestamp: ts, params: { model: config.model }, structure, files: Object.keys(filesMap) };
        fs.writeFileSync(path.join(runDir, `${ts}.json`), JSON.stringify(artifact, null, 2));
      }
    
      if (!allowWrite) return proposals;
      return materialize(filesMap, { allowWrite: true });
    }
  • Registers the generateProject function by exporting it from the orchestrator module, allowing it to be imported and used (e.g., in CLI).
    module.exports = { generateStructure, generateContents, generateProject };
  • Helper function to generate the project file/folder structure using an LLM call and validate it against a schema.
    async function generateStructure(userPrompt, runId) {
      const systemPrompt = fs.readFileSync(config.systemPromptPath, 'utf8');
      const llmCall = config.provider === 'openai' ? callOpenAI : callAnthropic;
      const text = await llmCall({
        systemPrompt,
        userPrompt: buildStructureUserPrompt(userPrompt),
        model: config.model,
        temperature: config.temperature,
        timeoutMs: config.timeoutMs,
        maxRetries: config.maxRetries,
        baseDelayMs: config.baseDelayMs
      });
      if (typeof text !== 'string') {
        throw new Error('Model response was not text');
      }
      writeRunText(runId, 'structure.raw', text);
      const json = extractJson(text);
      // Normalize: keep only file paths and ensure empty content for structure step
      const files = json && json.files && typeof json.files === 'object' ? Object.keys(json.files) : [];
      const normalized = {
        folders: Array.isArray(json.folders) ? json.folders : ['specs', 'docs'],
        files: files.reduce((acc, p) => { acc[p] = ""; return acc; }, {})
      };
      const ok = validateStructure(normalized);
      if (!ok) throw new Error('Structure JSON failed validation: ' + ajv.errorsText(validateStructure.errors));
      return normalized;
    }
  • Helper function to generate file contents in batches using LLM calls and validate.
    async function generateContents(userPrompt, structure, runId) {
      const systemPrompt = fs.readFileSync(config.systemPromptPath, 'utf8');
      const allPaths = Object.keys(structure.files || {});
      const batches = chunkArray(allPaths, 3);
      const merged = {};
      for (let i = 0; i < batches.length; i++) {
        const prompt = buildContentBatchPrompt(userPrompt, batches[i]);
        const llmCall2 = config.provider === 'openai' ? callOpenAI : callAnthropic;
        const text = await llmCall2({
          systemPrompt,
          userPrompt: prompt,
          model: config.model,
          temperature: config.temperature,
          timeoutMs: config.timeoutMs,
          maxRetries: config.maxRetries,
          baseDelayMs: config.baseDelayMs
        });
        if (typeof text !== 'string') {
          throw new Error('Model response was not text');
        }
        writeRunText(runId, `files.batch-${i + 1}.raw`, text);
        const json = extractJson(text);
        const ok = validateFiles(json);
        if (!ok) throw new Error('Files JSON failed validation: ' + ajv.errorsText(validateFiles.errors));
        Object.assign(merged, json.files);
      }
      return merged;
    }
  • Helper to materialize (write) the generated files to disk or return proposals.
    async function materialize(filesMap, { allowWrite = false } = {}) {
      const results = [];
      for (const [filePath, content] of Object.entries(filesMap)) {
        results.push(createFile(filePath, content, { allowWrite }));
      }
      return results;
    }
Install Server

Other Tools

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Huxley-Brown/Project-Setup-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server