upsert-prompt
Create or update prompts with templates and model configurations for consistent AI-driven outputs. Manage prompts efficiently for tasks like email generation or content creation.
Instructions
Create or update a prompt with its template and configuration. Creates a new prompt and its initial version with specified model settings.
Example usage: Create a new prompt named 'email_generator' with a template for generating emails
Expected return: A confirmation message of successful prompt creation
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| description | No | ||
| model_name | No | gpt-4 | |
| model_provider | No | OPENAI | |
| name | Yes | ||
| temperature | No | ||
| template | Yes |
Implementation Reference
- js/packages/phoenix-mcp/src/promptTools.ts:350-453 (registration)Registration of the "upsert-prompt" MCP tool, including description, schema reference, and inline handler function that prepares a prompt version based on the model provider and creates the prompt using the Phoenix client.server.tool( "upsert-prompt", UPSERT_PROMPT_DESCRIPTION, createPromptSchema.shape, async ({ name, description, template, model_provider, model_name, temperature, }) => { let promptVersionData; switch (model_provider) { case "OPENAI": promptVersionData = promptVersion({ modelProvider: "OPENAI", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, }, }); break; case "AZURE_OPENAI": promptVersionData = promptVersion({ modelProvider: "AZURE_OPENAI", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, }, }); break; case "ANTHROPIC": promptVersionData = promptVersion({ modelProvider: "ANTHROPIC", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, max_tokens: 1000, // Required for Anthropic }, }); break; case "GOOGLE": promptVersionData = promptVersion({ modelProvider: "GOOGLE", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, }, }); break; } const response = await createPrompt({ client: client, name: name, description: description || "", version: promptVersionData, }); return { content: [ { type: "text", text: `Successfully created prompt "${name}":\n${JSON.stringify(response, null, 2)}`, }, ], }; } ); server.tool( "list-prompt-versions", LIST_PROMPT_VERSIONS_DESCRIPTION, listPromptVersionsSchema.shape, async ({ prompt_identifier, limit }) => {
- Inline handler for upsert-prompt tool: constructs a promptVersion object tailored to the specified model provider and invokes createPrompt via the Phoenix client to upsert the prompt.async ({ name, description, template, model_provider, model_name, temperature, }) => { let promptVersionData; switch (model_provider) { case "OPENAI": promptVersionData = promptVersion({ modelProvider: "OPENAI", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, }, }); break; case "AZURE_OPENAI": promptVersionData = promptVersion({ modelProvider: "AZURE_OPENAI", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, }, }); break; case "ANTHROPIC": promptVersionData = promptVersion({ modelProvider: "ANTHROPIC", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, max_tokens: 1000, // Required for Anthropic }, }); break; case "GOOGLE": promptVersionData = promptVersion({ modelProvider: "GOOGLE", modelName: model_name, description: description || "", template: [ { role: "user", content: [{ type: "text", text: template }], }, ], invocationParameters: { temperature: temperature, }, }); break; } const response = await createPrompt({ client: client, name: name, description: description || "", version: promptVersionData, }); return { content: [ { type: "text", text: `Successfully created prompt "${name}":\n${JSON.stringify(response, null, 2)}`, }, ], }; } ); server.tool( "list-prompt-versions", LIST_PROMPT_VERSIONS_DESCRIPTION, listPromptVersionsSchema.shape,
- Zod input schema for the upsert-prompt tool (createPromptSchema), defining parameters like name, description, template, model_provider, model_name, and temperature with transformations and defaults.export const createPromptSchema = z.object({ name: z .string() .transform( (val) => val .toLowerCase() .replace(/\s+/g, "_") // Replace spaces with underscores .replace(/[^\w_]/g, "") // Remove anything that's not alphanumeric or underscore ) .refine((val) => val.length > 0, { message: "Name cannot be empty after transformation", }), description: z.string().optional(), template: z.string(), model_provider: z .enum(["OPENAI", "AZURE_OPENAI", "ANTHROPIC", "GOOGLE"]) .optional() .default("OPENAI"), model_name: z.string().optional().default("gpt-4"), temperature: z.number().optional().default(0.7), });