models
Manage AI model configurations for task generation and research operations in Task Master. Set primary, fallback, and research models, or list available models with cost details for optimized task execution.
Instructions
Get information about available AI models or set model configurations. Run without arguments to get the current model configuration and API key status for the selected model providers.
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
| azure | No | Indicates the set model ID is a custom Azure OpenAI model. | |
| bedrock | No | Indicates the set model ID is a custom AWS Bedrock model. | |
| listAvailableModels | No | List all available models not currently in use. Input/output costs values are in dollars (3 is $3.00). | |
| ollama | No | Indicates the set model ID is a custom Ollama model. | |
| openrouter | No | Indicates the set model ID is a custom OpenRouter model. | |
| projectRoot | Yes | The directory of the project. Must be an absolute path. | |
| setFallback | No | Set the model to use if the primary fails. Model provider API key is required in the MCP config ENV. | |
| setMain | No | Set the primary model for task generation/updates. Model provider API key is required in the MCP config ENV. | |
| setResearch | No | Set the model for research-backed operations. Model provider API key is required in the MCP config ENV. | |
| vertex | No | Indicates the set model ID is a custom Google Vertex AI model. |
Implementation Reference
- Primary handler function `modelsDirect` implementing core logic: handles model listing, setting for roles (main/research/fallback), configuration retrieval, and validation.export async function modelsDirect(args, log, context = {}) { const { session } = context; const { projectRoot } = args; // Extract projectRoot from args // Create a logger wrapper that the core functions can use const mcpLog = createLogWrapper(log); log.info(`Executing models_direct with args: ${JSON.stringify(args)}`); log.info(`Using project root: ${projectRoot}`); // Validate flags: only one custom provider flag can be used simultaneously const customProviderFlags = CUSTOM_PROVIDERS_ARRAY.filter( (provider) => args[provider] ); if (customProviderFlags.length > 1) { log.error( 'Error: Cannot use multiple custom provider flags simultaneously.' ); return { success: false, error: { code: 'INVALID_ARGS', message: 'Cannot use multiple custom provider flags simultaneously. Choose only one: openrouter, ollama, bedrock, azure, vertex, or openai-compatible.' } }; } try { enableSilentMode(); try { // Check for the listAvailableModels flag if (args.listAvailableModels === true) { return await getAvailableModelsList({ session, mcpLog, projectRoot }); } // Handle setting any model role using unified function const modelContext = { session, mcpLog, projectRoot }; const modelSetResult = await handleModelSetting(args, modelContext); if (modelSetResult) { return modelSetResult; } // Default action: get current configuration return await getModelConfiguration({ session, mcpLog, projectRoot }); } finally { disableSilentMode(); } } catch (error) { log.error(`Error in models_direct: ${error.message}`); return { success: false, error: { code: 'DIRECT_FUNCTION_ERROR', message: error.message, details: error.stack } }; } }
- mcp-server/src/tools/models.js:19-81 (schema)Zod input schema for the 'models' tool parameters, defining options for setting main/research/fallback models, listing available models, and provider flags.parameters: z.object({ setMain: z .string() .optional() .describe( 'Set the primary model for task generation/updates. Model provider API key is required in the MCP config ENV.' ), setResearch: z .string() .optional() .describe( 'Set the model for research-backed operations. Model provider API key is required in the MCP config ENV.' ), setFallback: z .string() .optional() .describe( 'Set the model to use if the primary fails. Model provider API key is required in the MCP config ENV.' ), listAvailableModels: z .boolean() .optional() .describe( 'List all available models not currently in use. Input/output costs values are in dollars (3 is $3.00).' ), projectRoot: z .string() .describe('The directory of the project. Must be an absolute path.'), openrouter: z .boolean() .optional() .describe('Indicates the set model ID is a custom OpenRouter model.'), ollama: z .boolean() .optional() .describe('Indicates the set model ID is a custom Ollama model.'), bedrock: z .boolean() .optional() .describe('Indicates the set model ID is a custom AWS Bedrock model.'), azure: z .boolean() .optional() .describe('Indicates the set model ID is a custom Azure OpenAI model.'), vertex: z .boolean() .optional() .describe( 'Indicates the set model ID is a custom Google Vertex AI model.' ), 'openai-compatible': z .boolean() .optional() .describe( 'Indicates the set model ID is a custom OpenAI-compatible model. Requires baseURL parameter.' ), baseURL: z .string() .optional() .describe( 'Custom base URL for providers that support it (e.g., https://api.example.com/v1).' ) }),
- mcp-server/src/tools/models.js:14-107 (registration)`registerModelsTool` function that adds the 'models' MCP tool to the server with name, description, schema, and thin wrapper execute handler calling `modelsDirect`.export function registerModelsTool(server) { server.addTool({ name: 'models', description: 'Get information about available AI models or set model configurations. Run without arguments to get the current model configuration and API key status for the selected model providers.', parameters: z.object({ setMain: z .string() .optional() .describe( 'Set the primary model for task generation/updates. Model provider API key is required in the MCP config ENV.' ), setResearch: z .string() .optional() .describe( 'Set the model for research-backed operations. Model provider API key is required in the MCP config ENV.' ), setFallback: z .string() .optional() .describe( 'Set the model to use if the primary fails. Model provider API key is required in the MCP config ENV.' ), listAvailableModels: z .boolean() .optional() .describe( 'List all available models not currently in use. Input/output costs values are in dollars (3 is $3.00).' ), projectRoot: z .string() .describe('The directory of the project. Must be an absolute path.'), openrouter: z .boolean() .optional() .describe('Indicates the set model ID is a custom OpenRouter model.'), ollama: z .boolean() .optional() .describe('Indicates the set model ID is a custom Ollama model.'), bedrock: z .boolean() .optional() .describe('Indicates the set model ID is a custom AWS Bedrock model.'), azure: z .boolean() .optional() .describe('Indicates the set model ID is a custom Azure OpenAI model.'), vertex: z .boolean() .optional() .describe( 'Indicates the set model ID is a custom Google Vertex AI model.' ), 'openai-compatible': z .boolean() .optional() .describe( 'Indicates the set model ID is a custom OpenAI-compatible model. Requires baseURL parameter.' ), baseURL: z .string() .optional() .describe( 'Custom base URL for providers that support it (e.g., https://api.example.com/v1).' ) }), execute: withToolContext('models', async (args, context) => { try { context.log.info( `Starting models tool with args: ${JSON.stringify(args)}` ); // Use args.projectRoot directly (normalized by withToolContext) const result = await modelsDirect( { ...args, projectRoot: args.projectRoot }, context.log, { session: context.session } ); return handleApiResult({ result, log: context.log, errorPrefix: 'Error managing models', projectRoot: args.projectRoot }); } catch (error) { context.log.error(`Error in models tool: ${error.message}`); return createErrorResponse(error.message); } }) }); }
- mcp-server/src/tools/tool-registry.js:59-104 (registration)Central tool registry object mapping tool name 'models' to `registerModelsTool` for dynamic server registration.export const toolRegistry = { initialize_project: registerInitializeProjectTool, models: registerModelsTool, rules: registerRulesTool, parse_prd: registerParsePRDTool, 'response-language': registerResponseLanguageTool, analyze_project_complexity: registerAnalyzeProjectComplexityTool, expand_task: registerExpandTaskTool, expand_all: registerExpandAllTool, scope_up_task: registerScopeUpTool, scope_down_task: registerScopeDownTool, get_tasks: registerGetTasksTool, get_task: registerGetTaskTool, next_task: registerNextTaskTool, complexity_report: registerComplexityReportTool, set_task_status: registerSetTaskStatusTool, add_task: registerAddTaskTool, add_subtask: registerAddSubtaskTool, update: registerUpdateTool, update_task: registerUpdateTaskTool, update_subtask: registerUpdateSubtaskTool, remove_task: registerRemoveTaskTool, remove_subtask: registerRemoveSubtaskTool, clear_subtasks: registerClearSubtasksTool, move_task: registerMoveTaskTool, add_dependency: registerAddDependencyTool, remove_dependency: registerRemoveDependencyTool, validate_dependencies: registerValidateDependenciesTool, fix_dependencies: registerFixDependenciesTool, list_tags: registerListTagsTool, add_tag: registerAddTagTool, delete_tag: registerDeleteTagTool, use_tag: registerUseTagTool, rename_tag: registerRenameTagTool, copy_tag: registerCopyTagTool, research: registerResearchTool, autopilot_start: registerAutopilotStartTool, autopilot_resume: registerAutopilotResumeTool, autopilot_next: registerAutopilotNextTool, autopilot_status: registerAutopilotStatusTool, autopilot_complete: registerAutopilotCompleteTool, autopilot_commit: registerAutopilotCommitTool, autopilot_finalize: registerAutopilotFinalizeTool, autopilot_abort: registerAutopilotAbortTool, generate: registerGenerateTool };
- Helper `getModelConfiguration` used by handler to fetch and format current model configs, API key statuses, and model details for main/research/fallback roles.async function getModelConfiguration(options = {}) { const { mcpLog, projectRoot, session } = options; const report = (level, ...args) => { if (mcpLog && typeof mcpLog[level] === 'function') { mcpLog[level](...args); } }; if (!projectRoot) { throw new Error('Project root is required but not found.'); } // Use centralized config path finding instead of hardcoded path const configPath = findConfigPath(null, { projectRoot }); const configExists = isConfigFilePresent(projectRoot); log( 'debug', `Checking for config file using findConfigPath, found: ${configPath}` ); log( 'debug', `Checking config file using isConfigFilePresent(), exists: ${configExists}` ); if (!configExists) { throw new Error(CONFIG_MISSING_ERROR); } try { // Get current settings - these should use the config from the found path automatically const mainProvider = getMainProvider(projectRoot); const mainModelId = getMainModelId(projectRoot); const mainBaseURL = getBaseUrlForRole('main', projectRoot); const researchProvider = getResearchProvider(projectRoot); const researchModelId = getResearchModelId(projectRoot); const researchBaseURL = getBaseUrlForRole('research', projectRoot); const fallbackProvider = getFallbackProvider(projectRoot); const fallbackModelId = getFallbackModelId(projectRoot); const fallbackBaseURL = getBaseUrlForRole('fallback', projectRoot); // Check API keys const mainCliKeyOk = isApiKeySet(mainProvider, session, projectRoot); const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider, projectRoot); const researchCliKeyOk = isApiKeySet( researchProvider, session, projectRoot ); const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider, projectRoot); const fallbackCliKeyOk = fallbackProvider ? isApiKeySet(fallbackProvider, session, projectRoot) : true; const fallbackMcpKeyOk = fallbackProvider ? getMcpApiKeyStatus(fallbackProvider, projectRoot) : true; // Get available models to find detailed info const availableModels = getAvailableModels(projectRoot); // Find model details const mainModelData = availableModels.find((m) => m.id === mainModelId); const researchModelData = availableModels.find( (m) => m.id === researchModelId ); const fallbackModelData = fallbackModelId ? availableModels.find((m) => m.id === fallbackModelId) : null; // Return structured configuration data return { success: true, data: { activeModels: { main: { provider: mainProvider, modelId: mainModelId, baseURL: mainBaseURL, sweScore: mainModelData?.swe_score || null, cost: mainModelData?.cost_per_1m_tokens || null, keyStatus: { cli: mainCliKeyOk, mcp: mainMcpKeyOk } }, research: { provider: researchProvider, modelId: researchModelId, baseURL: researchBaseURL, sweScore: researchModelData?.swe_score || null, cost: researchModelData?.cost_per_1m_tokens || null, keyStatus: { cli: researchCliKeyOk, mcp: researchMcpKeyOk } }, fallback: fallbackProvider ? { provider: fallbackProvider, modelId: fallbackModelId, baseURL: fallbackBaseURL, sweScore: fallbackModelData?.swe_score || null, cost: fallbackModelData?.cost_per_1m_tokens || null, keyStatus: { cli: fallbackCliKeyOk, mcp: fallbackMcpKeyOk } } : null }, message: 'Successfully retrieved current model configuration' } }; } catch (error) { report('error', `Error getting model configuration: ${error.message}`); return { success: false, error: { code: 'CONFIG_ERROR', message: error.message } }; } }