/**
* MCP Tools definitions for NDB operations
*/
import { zodToJsonSchema } from 'zod-to-json-schema';
import { ProvisionDatabaseRequestSchema, RestoreDatabaseRequestSchema, UpdateTimeMachineRequestSchema, CreateCloneRequestSchema, CreateDbserverRequestSchema } from './schemas.js';
// Generate the full schema
const fullCreateDbserverInputSchema = zodToJsonSchema(CreateDbserverRequestSchema);
// Clone and override required fields for tool input
const createDbserverInputSchema = {
...fullCreateDbserverInputSchema,
required: ['name']
};
// Generate the full schema
const fullProvisionDatabaseInputSchema = zodToJsonSchema(ProvisionDatabaseRequestSchema);
// Clone and override required fields for tool input
const provisionDatabaseInputSchema = {
...fullProvisionDatabaseInputSchema,
required: ['databaseType', 'name']
};
// Generate the full schema
const fullCreateCloneInputSchema = zodToJsonSchema(CreateCloneRequestSchema);
// Clone and override required fields for tool input
const createCloneInputSchema = {
...fullCreateCloneInputSchema,
required: ['timeMachineId', 'name']
};
export const tools = [
////////////////////////
// Database Management
////////////////////////
{
name: 'list_databases',
description: `Get all registered and provisioned database instances with comprehensive filtering options. Returns a list of database objects with summarized information including status, type, ownership, etc.
The detailed information about each database instance (properties) is not included by default to optimize performance, but can be included by setting the detailed parameter to true.
The detailed information can be useful in some cases, for instance if you want to get the database version, the VM ip address, listener port, etc.
If not available with the detailed option, use get_database tool to get detailed information about a specific database instance.
**Best Practices:**
- Always use specific filters to get exactly what you need rather than broad queries
- Use properties filters (requires detailed=true) for version-specific searches
- Combine multiple filters to narrow results efficiently
**Available Filters (valueType/value pairs):**
- **name**: Database instance name in NDB (supports partial matching with *pattern*)
- **description**: Database instance description text
- **ownerId**: User ID who owns the database instance (use list_users to resolve usernames)
- **dateCreated/dateModified**: Date filters (use >YYYY-MM-DD, <YYYY-MM-DD for comparisons)
- **clustered**: true/false - whether database instance is clustered
- **eraCreated**: true/false - NDB-provisioned (greenfield) vs registered (brownfield)
- **databaseName**: Database instance name on the VM (may differ from instance name)
- **type**: Database engine type (postgres_database, oracle_database, sqlserver_database, mariadb_database, mysql_database, saphana_database, mongodb_database)
- **status**: Operational status (READY=operational, ERA_DAEMON_UNREACHABLE=agent issues, PROVISIONING=in progress, FAILED=error state)
- **dbserverLogicalClusterId**: Logical cluster hosting the database
- **timeMachineId**: Associated time machine for backups
- **properties.version**: Database engine version. Examples:
- PostgreSQL: 11, 12, 13, 14, 15, 16 (use >=15.5, >14, etc.)
- Oracle: 12c, 19c, 21c (use >12c, <19c, etc.)
- SQL Server: 2016, 2017, 2019, 2022 (use >=2019, etc.)
Properties are available only if detailed=true.
**Advanced Filtering:**
- Operators: !value (not), >value/<value (comparison), *value* (contains)
- Multiple filters: combine with comma-separated valueType/value pairs
- Example: Find PostgreSQL production databases: valueType="type,name", value="postgres_database,*prod*"
**Common Use Cases with Examples:**
- Find PostgreSQL 15.5+ for development: valueType="type,properties.version", value="postgres_database,>=15.5", detailed=true
- Find production databases by naming pattern: valueType="type,name", value="postgres_database,*prod*"
- Find problematic databases: valueType="status", value="!READY"
- Find databases by specific owner: valueType="ownerId", value="12345"
- Find clustered Oracle databases: valueType="type,clustered", value="oracle_database,true"
**Performance & Efficiency:** Always use specific filters to reduce result set size. Use detailed=true only when you need properties information for filtering or display.
**Use Cases:**
- Find databases by engine type, version or status
- Locate databases owned by specific users
- Search for databases with naming patterns
- Identify problematic databases (non-READY status)
- Get overview of database infrastructure
**Hint:** For databases on specific clusters, use list_dbservers with loadDatabases=true instead.
`,
inputSchema: {
type: 'object',
properties: {
detailed: {
type: 'boolean',
description: 'Load database with details contained in properties like db version, VM ip address, listener port, etc.',
default: false
},
valueType: {
type: 'string',
description: 'Comma-separated list of attribute names to filter on (e.g. \"type\" for engine type, \"type,status\" for multiple filters, \"name\" for database name search)',
},
value: {
type: 'string',
description: 'Comma-separated list of values corresponding to valueType. Use operators: !value (not), >value/<value (comparison), *value* (contains). Examples: \"postgres_database\", \"READY\", \"*prod*\", \"!READY\"',
},
},
required: []
}
},
{
name: 'get_database',
description: `Get detailed (not filtered) information for a specific database. Returns comprehensive database details including configuration, status, cluster information, and associated resources.
**When to Use:**
- Get detailed information about a single, specific database
- Access database-specific properties not available in list_databases
- Retrieve complete infrastructure and configuration details
- DO NOT use for multiple databases - use list_databases with filters instead
**Unique Information Available (not in list_databases):**
- Complete database configuration and engine-specific properties
- Detailed infrastructure information in databaseNodes array
- Associated dbservers collection with full details
- Linked databases information with creation details
- Complete time machine details (when detailed=true)
- Resource utilization and extended metadata
**Database Properties (engine-specific):**
- Oracle: PDB/CDB configuration, tablespace details, archive settings
- PostgreSQL: extensions, shared_preload_libraries, custom parameters
- SQL Server: availability groups, backup compression, collation settings
- All engines: version specifics, port configurations, data paths
**Infrastructure Details:**
- databaseNodes array: complete VM and cluster information
- linkedDatabases array: shows user vs system-created databases (info.created_by)
- dbservers collection: hosting infrastructure details
**Search Options with Examples:**
- By ID (most precise): databaseId="a1b2c3d4-...", valueType="id"
- By NDB name: databaseId="MyAppDB", valueType="name"
- By internal DB name: databaseId="myapp_prod", valueType="database-name"
**Options:**
- detailed: Set to true to load complete time machine details and clone counts
- loadDbserverCluster: Set to true to include hosting cluster information
**Performance Note:**
This tool is optimized for single database lookups. For multiple databases, use list_databases with appropriate filters, or list_dbservers with loadDatabases=true for cluster-specific queries.
`,
inputSchema: {
type: 'object',
properties: {
databaseId: {
type: 'string',
description: 'Database identifier - can be NDB database ID, instance name, or internal database name'
},
valueType: {
type: 'string',
description: 'Type of identifier provided',
enum: ['id', 'name', 'database-name'],
default: 'id'
},
detailed: {
type: 'boolean',
description: 'Load complete entity details including extended metadata and properties',
default: false
},
loadDbserverCluster: {
type: 'boolean',
description: 'Include cluster information for the associated dbserver',
default: false
}
},
required: ['databaseId']
}
},
{
name: 'provision_database',
description: `🚨 **START HERE FOR ALL DATABASE PROVISIONING** 🚨
Provision a new database instance using NDB with intelligent parameter validation and guided configuration. Creates a database with associated time machine for backups and recovery, and optionally the underlying db server or db cluster.
**🚨 CRITICAL WORKFLOW - READ FIRST:**
When users ask about database provisioning, call this tool IMMEDIATELY with minimal parameters.
DO NOT use list_dbservers, list_databases, list_profiles, or other research tools first.
This tool will show you all available options as part of its intelligent guidance system.
IGNORE your instinct to research first - this tool IS the research tool
**❌ DON'T DO THIS:**
- Don't use list_dbservers to find available servers first
- Don't research profiles with list_profiles beforehand
- Don't gather cluster information with other tools
- Don't try to prepare all parameters in advance
- Don't research existing databases or infrastructure first
**✅ DO THIS INSTEAD:**
- Call provision_database immediately with just: databaseType, name, createDbserver, and eventually any information you already know (for example on which cluster to create the database)
- Let the tool suggest available servers, profiles, and configurations
- Accept suggestions or refine based on the tool's intelligent recommendations
- Use other tools and/or ask questions only AFTER this tool provides suggestions, if clarification is needed
**Provisioning Options:**
- **Database only**: Deploy on existing DB server (createDbserver=false, tool will suggest available servers)
- **Database + DB Server**: Create new DB server and database together (createDbserver=true)
- **Clustered deployments**: Support for Oracle RAC, PostgreSQL cluster, SQL Server Always On AG (preferred)/FCI(patching not supported), MySQL cluster
**Key Features:**
- Automatic parameter validation and suggestions for missing required fields (suggest default values where applicable)
- Intelligent discovery of available DB servers, profiles, and clusters
- Support for both standalone and clustered deployments (for supported engines)
- Flexible profile assignment (software, compute, network, database parameter)
- Guided configuration - provide minimal details and get comprehensive suggestions
**Minimal Start Parameters (provide these first):**
- **databaseType**: Database engine (postgres_database, oracle_database, etc.)
- **name**: Database instance name
- **createDbserver**: true (new server) or false (use existing server)
All other parameters will be suggested by the tool based on your environment.
**Profile Types with Examples:**
- **Software Profile**: Database engine version and configuration (e.g., PostgreSQL 15.5, Oracle 19c Enterprise)
- **Compute Profile**: CPU, memory, and storage sizing (e.g., 4 vCPU, 16GB RAM, 500GB storage)
- **Network Profile**: Network settings and VLAN assignment (e.g., production VLAN, static IP)
- **Database Parameter Profile**: Engine-specific tuning parameters (e.g., shared_buffers, work_mem for PostgreSQL)
**Time Machine Configuration:**
Specify time machine settings for automated backups and retention policies, including schedule and SLA requirements. SLA policy defines the backup frequency and retention duration.
Development databases can use simpler schedules, without continuous backups, while production databases may require more frequent backups and longer retention.
Default values are part of schema definition but ask for specifics and enable different schedule based on SLA selected (for instance, if the SLA requires continuous backups, enable that in the schedule).
**Guided Workflow Process:**
1. START: Call tool with minimal parameters (databaseType, name, createDbserver)
2. REVIEW: Tool shows available servers, profiles, and suggests defaults
3. REFINE: Modify suggestions based on requirements
4. VALIDATE: Use testMode=true to preview configuration if complex
5. PROVISION: Execute with confirmed parameters
**When to Use This vs Other Tools:**
- ✅ Use this tool FIRST for all new database provisioning (greenfield)
- Use create_dbserver ONLY for DB server without database
- Use register_database for existing databases (brownfield)
- Use list_* tools ONLY after this tool provides suggestions, if clarification needed
**Test Mode:**
Set testMode=true to return the JSON payload that would be sent to the API, without actually provisioning the database. Useful for debugging, validation, and previewing the request.
**Example Minimal Start:**
{
"databaseType": "postgres_database",
"name": "my-dev-postgres",
"createDbserver": false
}
The tool will then show available DB servers and suggest all other parameters.
**REMEMBER: This tool is designed to guide you through the entire provisioning process. Don't research beforehand - let it do the work!**
`,
inputSchema: provisionDatabaseInputSchema
},
{
name: 'register_database',
description: 'Register an existing database with NDB',
inputSchema: {
type: 'object',
properties: {
databaseType: {
type: 'string',
description: 'Database engine type (e.g. postgres_database, oracle_database, etc.)'
},
databaseName: {
type: 'string',
description: 'Name of the database to register'
},
vmIp: {
type: 'string',
description: 'IP address of the VM hosting the database'
},
nxClusterId: {
type: 'string',
description: 'Nutanix cluster ID'
},
actionArguments: {
type: 'array',
description: 'Database-specific configuration arguments',
items: {
type: 'object',
properties: {
name: { type: 'string' },
value: { type: 'string' }
}
}
}
},
required: ['databaseType', 'databaseName', 'vmIp', 'nxClusterId']
}
},
{
name: 'update_database',
description: 'Update database name, description, or tags',
inputSchema: {
type: 'object',
properties: {
databaseId: {
type: 'string',
description: 'Database ID'
},
name: {
type: 'string',
description: 'New database name'
},
description: {
type: 'string',
description: 'New database description'
},
tags: {
type: 'array',
description: 'Database tags',
items: {
type: 'object',
properties: {
tagId: { type: 'string' },
value: { type: 'string' }
}
}
}
},
required: ['databaseId']
}
},
{
name: 'deregister_database',
description: 'Deregister a database from NDB which means the database will no longer be managed by NDB. This does not delete the database from the dbserver, but removes it from NDB management. Use delete=true to also delete the database from the dbserver.',
inputSchema: {
type: 'object',
properties: {
databaseId: {
type: 'string',
description: 'Database ID'
},
delete: {
type: 'boolean',
description: 'Delete the database from the dbserver',
default: false
},
remove: {
type: 'boolean',
description: 'Cleanup/deletion operations should be submitted',
default: false
},
deleteTimeMachine: {
type: 'boolean',
description: 'Delete associated time machine',
default: false
}
},
required: ['databaseId']
}
},
{
name: 'restore_database',
description: `Restore a database to a specific point in time or from a snapshot. This operation allows you to recover a database to a previous state.
**Key Features:**
- Restore to a specific timestamp or snapshot.
- Supports both full and partial restores.
**Use Cases:**
- Recover from accidental data loss.
- Restore a database to a known good state for testing or troubleshooting.
**Instructions:**
- Provide the database ID and the restore parameters (timestamp or snapshot ID).
- userPitrTimestamp must be using the following format: YYYY-MM-DD HH:mm:ss. It is using GMT so convert it to your local timezone before using it.
- Ensure the database is in a state that allows restoration.
- Validate the restore operation using the returned status.
- If an error occur, stop and wait for instructions.`,
inputSchema: zodToJsonSchema(RestoreDatabaseRequestSchema)
},
// Database Server Management
{
name: 'list_dbservers',
description: `Get all database servers. You can include information about databases and/or clones hosted on the db server by using the loadDatabases or loadClones parameters.
Filtering is done using valueType and value fields, which are used to filter the response sent by the NDB API, for flexible filtering.
**Available Filters (valueType/value pairs):**
- **name**: name of the dbserver VM
- **description**: description of the dbserver
- **ipAddresses**: array of IP addresses (partial match supported)
- **fqdns**: array of FQDNs (partial match supported)
- **status**: Operational status (UP=DbServer VM is up and running, DOWN=DbServer VM is down, PROVISIONING=in progress, FAILED=error state)
- **nxClusterId**: Nutanix cluster ID
- **databaseType**: type of the database engine (postgres_database, oracle_database, etc.)
- **dbserverClusterId**: ID of the logical cluster
- **databases**: number of databases (use valueType = "databases" and value = ">0" for dbservers with at least one database)
- **databases.<property>**: filter on a property of at least one database (e.g. databases.status, etc.)
Examples:
- To filter by dbserver engine type: valueType = "type", value = "postgres_database"
- To filter by status not READY: valueType = "status", value = "!READY"
- To filter by name containing 'prod': valueType = "name", value = "*prod*"
- To get dbservers with at least one database: valueType = "databases", value = ">0"
- To get dbservers with exactly one database: valueType = "databases", value = "=1"
- To get dbservers with at least one database in status READY: valueType = "databases.status", value = "READY"
Hints:
- You can combine any number of attributes in valueType and value to filter the results. Always try to use the most specific attributes first to reduce the result set.
- You can use operators in value for advanced filtering: !value for negation, >value or <value for comparisons, and *value* for partial (substring) search.
- Set loadDatabases or loadClones to false to avoid loading and returning the associated databases or clones arrays in the result.
- Use list_dbservers with loadDatabases set to true if the request asks for databases on a specific cluster. list_databases does not provide information about the dbserver, only the databases themselves.
- For advanced filtering on nested databases, use valueType like "databases.status" and the corresponding value.
- ** Always use available filters ** to reduce the result set, as the NDB API can return a large number of databases, especially in large environments. This will help you find the specific database you are looking for quickly.
- You can combine multiple filters in the same query to narrow down the results, such as filtering by type and status at the same time.
`,
inputSchema: {
type: 'object',
properties: {
valueType: {
type: 'string',
description: 'Comma-separated list of attribute names to filter on (e.g. "type", "type,status")',
},
value: {
type: 'string',
description: 'Comma-separated list of values corresponding to valueType (e.g. "postgres_engine", "postgres_engine,!READY")',
},
loadDatabases: {
type: 'boolean',
description: 'Load associated databases',
default: false
},
loadClones: {
type: 'boolean',
description: 'Load associated clones',
default: false
}
},
required: []
}
},
{
name: 'get_dbserver',
description: 'Get database server details',
inputSchema: {
type: 'object',
properties: {
dbserverId: {
type: 'string',
description: 'Database server ID or other identifier'
},
valueType: {
type: 'string',
description: 'Type of identifier',
enum: ['id', 'ip', 'name', 'vm-cluster-name', 'vm-cluster-uuid', 'dbserver-cluster-id', 'nx-cluster-id', 'fqdn'],
default: 'id'
},
loadDatabases: {
type: 'boolean',
description: 'Load associated databases',
default: false
},
loadClones: {
type: 'boolean',
description: 'Load associated clones',
default: false
}
},
required: ['dbserverId']
}
},
{
name: 'create_dbserver',
description: `Create a new database server VM on a Nutanix cluster, without the database with intelligent parameter validation and guided configuration.
This tool provisions a VM with the specified configurations, including compute, network, and software profiles.
It can then be used to host one or more databases, as a provisioning or clone target
**Key Features:**
- Automatic parameter validation and suggestions for missing required fields (suggest default values where applicable)
- Support for both standalone and clustered deployments (for supported engines, e.g. Oracle RAC, and SQL Server Always On or FCI. It is not possible to deploy PostgreSQL clusters with this tool, use provision_database instead)
**Hints**
- Provide only essential parameters like name, software profile or time machine - other parameters will be suggested
- Either a Software Profile or a Time Machine should be provided to create the DB VM.
- Only create_dbserver tool allows to create a DB VM from a Time Machine.
`,
inputSchema: createDbserverInputSchema
},
{
name: 'register_dbserver',
description: 'Register an existing database server with NDB',
inputSchema: {
type: 'object',
properties: {
vmIp: {
type: 'string',
description: 'IP address of the database server VM'
},
nxClusterUuid: {
type: 'string',
description: 'Nutanix cluster UUID'
},
databaseType: {
type: 'string',
enum: ['oracle_database', 'postgres_database', 'sqlserver_database', 'mariadb_database', 'mysql_database', 'mongodb_database']
},
username: {
type: 'string',
description: 'Username for VM access'
},
password: {
type: 'string',
description: 'Password for VM access'
},
actionArguments: {
type: 'array',
description: 'Additional configuration arguments',
items: {
type: 'object',
properties: {
name: { type: 'string' },
value: { type: 'string' }
}
}
}
},
required: ['vmIp', 'nxClusterUuid', 'databaseType', 'username', 'password']
}
},
{
name: 'delete_dbserver',
description: `Deregister (delete) a database server from NDB. This operation removes the dbserver and optionally its associated VM on the infrastructure. If the request does not specifically ask for the dbserver VM to be deleted, it will only remove the dbserver from NDB management.
**Parameters:**
- delete: If true, deletes the dbserver VM from the cluster. Technically, delete will always be set to true so that the DBServer is removed from NDB.
- remove: If true, delete the dbserver VM from the infrastructure.
**Behavior:**
- Always sets softRemove to false (no soft removal).
- Always sets deleteVgs and deleteVmSnapshots to true.
**API:** DELETE /dbservers/{dbserverId}`,
inputSchema: {
type: 'object',
properties: {
dbserverId: {
type: 'string',
description: 'ID of the Database Server to deregister.'
},
delete: {
type: 'boolean',
description: 'If true, deletes the dbserver VM from the cluster.'
},
remove: {
type: 'boolean',
description: 'If true, cleans up the dbserver from NDB.'
}
},
required: ['dbserverId']
}
},
// Clone Management
{
name: 'list_clones',
description: `Get all database clones. Filtering is done using valueType and value fields, which are used to filter the response sent by the NDB API, for flexible filtering.
You can filter by any of the following attribute names available in the clone object:
- id: clone id, string
- name: name of the clone, string
- description: description of the clone, string
- ownerId: Id of the user who created the clone, string
- dateCreated: date when the clone was created
- dateModified: date when the clone was last modified
- databaseName: name of the source database, string
- type: type of the database engine (postgres_engine, oracle_engine, etc.)
- status: current status of the clone (e.g. "READY", etc.)
- dbserverLogicalClusterId: ID of the logical cluster the clone is running on
- timeMachineId: ID of the time machine associated with the clone
- parentTimeMachineId: ID of the parent time machine (the time machine the clone was created from)
- timeZone: time zone of the clone
Examples:
- To filter by clone type: valueType = "type", value = "postgres_database"
- To filter by type and owner: valueType = "type,ownerId", value = "postgres_database,12345"
- To filter by status not READY: valueType = "status", value = "!READY"
- To filter by creation date after June 1st, 2025: valueType = "dateCreated", value = ">2025-06-01"
Hints:
- You can combine any number of attributes in valueType and value to filter the results. Always try to use the most specific attributes first to reduce the result set.
- You can use operators in value for advanced filtering: !value for negation, >value or <value for comparisons, and *value* for partial (substring) search.
`,
inputSchema: {
type: 'object',
properties: {
valueType: {
type: 'string',
description: 'Comma-separated list of attribute names to filter on (e.g. "type", "type,ownerId")',
},
value: {
type: 'string',
description: 'Comma-separated list of values corresponding to valueType (e.g. "postgres_database", "postgres_database,12345")',
},
},
required: []
}
},
{
name: 'get_clone',
description: 'Get clone details by ID, name, or database name',
inputSchema: {
type: 'object',
properties: {
cloneId: {
type: 'string',
description: 'Clone ID, name, or database name'
},
valueType: {
type: 'string',
description: 'Type of identifier',
enum: ['id', 'name', 'database-name'],
default: 'id'
},
detailed: {
type: 'boolean',
description: 'Load entities with entire details',
default: false
}
},
required: ['cloneId']
}
},
{
name: 'create_clone',
description: `🚨 **START HERE FOR ALL DATABASE CLONING** 🚨
Create a clone from a time machine using NDB with intelligent parameter validation and guided configuration. Creates a database clone with options to create a new database server or use an existing one.
**🚨 CRITICAL WORKFLOW - READ FIRST:**
When users ask about database cloning, call this tool IMMEDIATELY with minimal parameters.
DO NOT use list_dbservers, list_databases, list_profiles, or other research tools first.
This tool will show you all available options as part of its intelligent guidance system.
IGNORE your instinct to research first - this tool IS the research tool
**❌ DON'T DO THIS:**
- Don't use list_dbservers to find available servers first
- Don't research profiles with list_profiles beforehand
- Don't gather cluster information with other tools
- Don't try to prepare all parameters in advance
- Don't research existing databases or infrastructure first
**✅ DO THIS INSTEAD:**
- Call create_clone immediately with just: timeMachineId, name, and basic deployment preferences (createDbserver, and eventually any information you already know, for example on which cluster to create the clone)
- Let the tool suggest available servers, profiles, and configurations
- Accept suggestions or refine based on the tool's intelligent recommendations
- Use other tools and/or ask questions only AFTER this tool provides suggestions, if clarification is needed
**Key Features:**
- Clone from a specific snapshot or the latest available snapshot (if you set latestSnapshot to true, you don't need to provide a snapshotId, the latest one will be automatically picked)
- Validate input parameters and suggest compatible profiles, just provide the function with what you have and it will return missing parameters (don't try to guess)
- You can clone to an existing authorized dbvm (createDbserver false) and then provide information about it (dbserverId and others). Pay attention to the case sensitivity. Only authorized dbvms will be suggested.
- You can suggest adding authorized dbvms using add_authorized_dbserver tool before calling this tool if no suitable dbvm is available.
- You can create a new dbvm for the clone (createDbserver true) and then provide information about it.
**Cluster Requirements:**
- By default, clones can only be created on the same cluster where the time machine is available
- To create a clone on a different cluster:
1. First grant access to the target cluster using grant_time_machine_access tool
2. Wait for the time machine to be replicated to the target cluster
3. Then create the clone on the target cluster
- Use list_time_machine_access to check which clusters have access to a time machine
**Guided Workflow Process:**
1. START: Call tool with minimal parameters (timeMachineId, name, basic preferences)
2. REVIEW: Tool shows available servers, profiles, and suggests defaults
3. REFINE: Modify suggestions based on requirements
4. ASK USER: Always pause and ask user for confirmation before proceeding
5. EXECUTE: Execute with confirmed parameters
**When to Use This vs Other Tools:**
- ✅ Use this tool FIRST for all database cloning
- Use create_dbserver ONLY for DB server without database
- Use list_* tools ONLY after this tool provides suggestions, if clarification needed
**Minimal Start Parameters (provide these first):**
- **timeMachineId**: Source time machine ID
- **name**: Clone database name
- **createDbserver**: true (new server) or false (use existing server)
All other parameters will be suggested by the tool based on your environment.
**REMEMBER: This tool is designed to guide you through the entire cloning process. Don't research beforehand - let it do the work! Always pause after each tool call to ask for user feedback.**
`,
inputSchema: createCloneInputSchema,
},
{
name: 'refresh_clone',
description: `Refresh a clone with latest data.
**Key Features:**
- Supports refreshing from a specific snapshot or the latest available snapshot (in that case, no needs to specify snapshotId)
`,
inputSchema: {
type: 'object',
properties: {
cloneId: {
type: 'string',
description: 'Clone ID'
},
snapshotId: {
type: 'string',
description: 'Snapshot ID to refresh from'
},
latestSnapshot: {
type: 'boolean',
description: 'Use latest available snapshot',
default: false
}
},
required: ['cloneId']
}
},
{
name: 'delete_clone',
description: `Delete a clone
**Key Features:**
- Removes the clone and its associated resources (delete = true)
- Can optionally delete the underlying database server (remove = true)
**Instructions:**
- Provide the clone ID and specify whether to delete the database server.
`,
inputSchema: {
type: 'object',
properties: {
cloneId: {
type: 'string',
description: 'Clone ID'
},
delete: {
type: 'boolean',
description: 'Delete the clone database',
default: false
},
remove: {
type: 'boolean',
description: 'Remove the database server VM',
default: false
}
},
required: ['cloneId']
}
},
// Time Machine Management
{
name: 'list_time_machines',
description: `Get list of all time machines. Returns a summarized list of time machines with key fields only.
**Returned fields:**
- id, name, description, databaseId, logDriveId, type, status, slaId, scheduleId, ownerId, dateCreated, dateModified, properties (array of {ref_id, name, value, secure, description}), zeroSla, slaSet, continuousRecoveryEnabled, snapshotableState
**Advanced Filtering:**
- You can use operators in value for advanced filtering: !value for negation, >value/<value for comparisons, and *value* for partial (substring) search.
- Multiple filters: combine with comma-separated valueType/value pairs (e.g. valueType="status,type", value="READY,CONTINUOUS")
- You can filter on the number of elements in an array property by using the property name (e.g. valueType="properties", value=">1" for time machines with more than one property)
- You can filter on a property of a nested array by using dot notation (e.g. valueType="properties.name", value="*retention*")
- Examples:
- Find all time machines in READY status: valueType="status", value="READY"
- Find time machines with more than one property: valueType="properties", value=">1"
- Find time machines with a property whose name contains 'retention': valueType="properties.name", value="*retention*"
- Find time machines not in CONTINUOUS type: valueType="type", value="!CONTINUOUS"
- Find time machines created after 2024-01-01: valueType="dateCreated", value=">2024-01-01"
**Performance Note:** Always use specific filters to reduce result set size in large environments.
`,
inputSchema: {
type: 'object',
properties: {
valueType: {
type: 'string',
description: 'Comma-separated list of attribute names to filter on (e.g. "status", "type", "properties.name")',
},
value: {
type: 'string',
description: 'Comma-separated list of values corresponding to valueType. Use operators: !value (not), >value/<value (comparison), *value* (contains).',
}
}
}
},
{
name: 'get_time_machine',
description: 'Get time machine details',
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'Time machine ID or name'
},
valueType: {
type: 'string',
description: 'Type of identifier',
enum: ['id', 'name'],
default: 'id'
},
detailed: {
type: 'boolean',
description: 'Load entities with entire details',
default: false
}
},
required: ['timeMachineId']
}
},
{
name: 'get_time_machine_capability',
description: 'Get recovery capability of a time machine',
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'Time machine ID'
},
timeZone: {
type: 'string',
description: 'Time zone for timestamps',
default: 'UTC'
},
loadHealth: {
type: 'boolean',
description: 'Include health information',
default: false
}
},
required: ['timeMachineId']
}
},
{
name: 'pause_time_machine',
description: 'Pause a time machine. When paused, no new snapshots nor log catchup will be taken until it is resumed.',
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'Time machine ID'
},
forced: {
type: 'boolean',
description: 'Force pause operation',
default: false
},
reason: {
type: 'string',
description: 'Reason for pausing'
}
},
required: ['timeMachineId']
}
},
{
name: 'resume_time_machine',
description: 'Resume a paused time machine. When resumed, snapshots and log catchup will continue as per the defined schedule and SLA.',
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'Time machine ID'
},
resetCapability: {
type: 'boolean',
description: 'Reset capability after resume',
default: false
}
},
required: ['timeMachineId']
}
},
{
name: 'update_time_machine',
description: `Update a Time Machine's name, description, SLA, schedule, or tags. Supports resetting specific fields to their default values.
**Instructions**
- Provide the timeMachineId of the Time Machine to update.
- You can change any of these four fields: name, description, SLA, schedule, or tags.
- To change a field (except tags), set the corresponding reset<FieldName> parameter to true (e.g., resetName, resetDescription)
- Changing SLA will trigger an operation, changing other settings will be done immediately.
- Changing SLA will remove all previous snapshots, so be careful when modifying this field, and warn the user about potential data loss.
`,
inputSchema: zodToJsonSchema(UpdateTimeMachineRequestSchema),
},
{
name: 'add_authorized_server',
description: `Add authorized database servers to a Time Machine. This allows the servers to be used for hosting clones created by the Time Machine.
**Requirements:**
- The database servers must be of the same type as the Time Machine's database (e.g., PostgreSQL, Oracle).
- The database engine version on the servers must be compatible with the Time Machine's database version.
- The database servers must be on the same Nutanix cluster as the Time Machine, unless the Time Machine has been granted access to another cluster.
- The database servers must be in READY status.
- The database servers must not already be authorized for the Time Machine.
**Parameters:**
- **timeMachineId**: The ID of the Time Machine.
- **dbserverIds**: An array of database server IDs to authorize.
**Example:**
{
"timeMachineId": "123e4567-e89b-12d3-a456-426614174000",
"dbserverIds": ["ac1b1206-7e02-4bc0-b93d-8939d3ae4f8d", "6c171206-ae92-4b1a-113d-a1b9d3ae4123"]
}
`,
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'The ID of the Time Machine.'
},
dbserverIds: {
type: 'array',
items: {
type: 'string'
},
description: 'An array of database server IDs to authorize.'
}
},
required: ['timeMachineId', 'dbserverIds']
}
},
{
name: 'get_authorized_server',
description: `Retrieve a list of all database servers currently authorized for a given Time Machine. These servers can be used to host clones created by the Time Machine.
**Parameters:**
- **timeMachineId**: The ID of the Time Machine.
- **usable**: Whether to include only usable servers (default: false).
**Example:**
{
"timeMachineId": "123e4567-e89b-12d3-a456-426614174000",
"usable": true
}
`,
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'The ID of the Time Machine.'
},
usable: {
type: 'boolean',
description: 'Whether to include only usable servers (default: false).',
default: false
}
},
required: ['timeMachineId']
}
},
{
name: 'get_time_machine_access',
description: 'List all Nutanix clusters that have access to a specific Time Machine, alongside their SLA policies.',
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'The ID of the Time Machine.'
}
},
required: ['timeMachineId']
}
},
{
name: 'grant_time_machine_access',
description: `
Grant access to a time machine to another Nutanix cluster, using a specified SLA policy for that cluster. That SLA can't be higher than the SLA of the source time machine.
For instance, if the source time machine has a Bronze SLA, you can't use Silver or Gold for the target cluster.
If slaId is not provided, suggest using the same SLA as the source time machine.
`,
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'The ID of the Time Machine.'
},
nxClusterId: {
type: 'string',
description: 'The ID of the target Nutanix cluster.'
},
slaId: {
type: 'string',
description: 'SLA policy ID for the target cluster. If not provided, the same SLA as the source will be used.'
}
},
required: ['timeMachineId', 'targetClusterId', 'slaId']
}},
// Snapshot Management
{
name: 'list_snapshots',
description: 'Get list of all snapshots',
inputSchema: {
type: 'object',
properties: {
valueType: {
type: 'string',
description: 'Filter type',
enum: ['type', 'status', 'protection-domain-id', 'database-node', 'snapshot-id', 'time-machine', 'latest']
},
value: {
type: 'string',
description: 'Filter value'
},
databaseIds: {
type: 'string',
description: 'Comma-separated database IDs'
},
limit: {
type: 'integer',
description: 'Number of snapshots to return',
default: 100
}
}
}
},
{
name: 'get_snapshot',
description: 'Get snapshot details by ID',
inputSchema: {
type: 'object',
properties: {
snapshotId: {
type: 'string',
description: 'Snapshot ID'
},
timeZone: {
type: 'string',
description: 'Time zone for timestamps',
default: 'UTC'
}
},
required: ['snapshotId']
}
},
{
name: 'take_snapshot',
description: 'Take a snapshot of a time machine',
inputSchema: {
type: 'object',
properties: {
timeMachineId: {
type: 'string',
description: 'Time machine ID'
},
name: {
type: 'string',
description: 'Snapshot name'
},
expireInDays: {
type: 'integer',
description: 'Snapshot expiry in days'
}
},
required: ['timeMachineId']
}
},
{
name: 'delete_snapshot',
description: 'Delete a snapshot',
inputSchema: {
type: 'object',
properties: {
snapshotId: {
type: 'string',
description: 'Snapshot ID'
}
},
required: ['snapshotId']
}
},
// Cluster Management
{
name: 'list_clusters',
description: `Get list of all Nutanix clusters with advanced filtering and reduced schema mapping. Returns only the most relevant fields for each cluster.
**Returned fields:**
- id: string
- name: string
- uniqueName: string
- ipAddresses: string[]
- fqdns: string[]
- description: string
- cloudType: string
- dateCreated: string
- dateModified: string
- ownerId: string
- status: string
- version: string
- hypervisorType: string
- hypervisorVersion: string
**Advanced Filtering:**
- Use valueType/value to filter the result set after mapping. Supports all returned fields, including array length (e.g. ipAddresses, fqdns).
- Operators: !value (not), >value/<value (comparison), *value* (contains)
- Multiple filters: combine with comma-separated valueType/value pairs
- Examples:
- Find clusters with name containing 'prod': valueType="name", value="*prod*"
- Find clusters with more than one IP: valueType="ipAddresses", value=">1"
- Find clusters not in READY status: valueType="status", value="!READY"
- Find clusters created after 2024-01-01: valueType="dateCreated", value=">2024-01-01"
**Performance Note:** Always use specific filters to reduce result set size in large environments.
`,
inputSchema: {
type: 'object',
properties: {
valueType: {
type: 'string',
description: 'Comma-separated list of attribute names to filter on (e.g. "name", "status,cloudType")',
},
value: {
type: 'string',
description: 'Comma-separated list of values corresponding to valueType. Use operators: !value (not), >value/<value (comparison), *value* (contains).',
}
}
}
},
{
name: 'get_cluster',
description: 'Get cluster details by ID',
inputSchema: {
type: 'object',
properties: {
clusterId: {
type: 'string',
description: 'Cluster ID'
}
},
required: ['clusterId']
}
},
{
name: 'get_cluster_by_name',
description: 'Get cluster details by name',
inputSchema: {
type: 'object',
properties: {
clusterName: { type: 'string', description: 'Cluster name' }
},
required: ['clusterName']
}
},
// Profile Management
{
name: 'list_profiles',
description: `Get a list of all profiles with advanced filtering options.
**Profile types:**
- **Software**: The image that will be used to create the database (e.g. a specific version of PostgreSQL, Oracle, etc.).
- **Compute**: The resources that will be allocated to the database (CPU, RAM, etc.). Compute profiles are database engine agnostic, meaning they can be used for any database engine.
- **Network**: The network configuration that will be applied to the database (VLAN, subnet, etc.).
- **Database_Parameter**: The database parameters that will be applied to the database (init parameters, tuning, etc.).
**API-side filtering parameters:**
- **engine**: Filter by database engine (e.g. postgres_database, oracle_database, etc.)
- **type**: Filter by profile type (Software, Compute, Network, Database_Parameter)
These parameters are sent directly to the NDB API to reduce the result set before advanced filtering is applied.
**Advanced Filtering (MCP server-side):**
- Use valueType/value to filter the result set returned by the API before sending to the LLM.
- Supports filtering on top-level profile fields and nested properties of versions (e.g. versions.name, versions.published, versions.length).
- Operators: !value (not), >value/<value (comparison), *value* (contains)
- Multiple filters: combine with comma-separated valueType/value pairs
- Example: Find all user-defined PostgreSQL software profiles with at least 2 published versions: valueType="engineType,type,systemProfile,versions.length,versions.published", value="postgres_database,Software,false,>=2,true"
**Available valueType fields:**
- **id, name, description, owner, engineType, type, nxClusterId, dbVersion, systemProfile, dateCreated, dateModified** (top-level)
- **versions.length**: Number of versions
- **versions.<property>**: Any property of a version (e.g. versions.name, versions.published)
**Returned fields:**
- Only the following fields are included: id, name, description, dateCreated, dateModified, owner, engineType, type, nxClusterId, topology, dbVersion, systemProfile, assocDbServers, assocDatabases, latestVersion, latestVersionId, versions (with the same fields for each version).
**Use Cases:**
- List all available software or compute profiles for a given engine
- Filter profiles by owner, type, or cluster
- Search for profiles with specific naming patterns
- Filter on the number or properties of versions (e.g. only profiles with published versions)
- Get a concise overview of profile versions and their publication status
`,
inputSchema: {
type: 'object',
properties: {
engine: {
type: 'string',
description: 'Filter by database engine',
enum: ['oracle_database', 'postgres_database', 'sqlserver_database', 'mariadb_database', 'mysql_database', 'mongodb_database']
},
type: {
type: 'string',
description: 'Filter by profile type',
enum: ['Software', 'Compute', 'Network', 'Database_Parameter']
},
valueType: {
type: 'string',
description: 'Comma-separated list of fields to filter on (e.g. "name,type,systemProfile")'
},
value: {
type: 'string',
description: 'Comma-separated list of values to filter by (e.g. "*prod*,Software,false")'
}
}
}
},
{
name: 'get_profile',
description: 'Get profile details by ID or name',
inputSchema: {
type: 'object',
properties: {
profileId: {
type: 'string',
description: 'Profile ID or name'
},
byName: {
type: 'boolean',
description: 'Whether to search by name instead of ID',
default: false
},
engine: {
type: 'string',
description: 'Filter by database engine (required when searching by name)',
enum: ['oracle_database', 'postgres_database', 'sqlserver_database', 'mariadb_database', 'mysql_database', 'mongodb_database']
},
type: {
type: 'string',
description: 'Filter by profile type (required when searching by name)',
enum: ['Software', 'Compute', 'Network', 'Database_Parameter']
}
},
required: ['profileId']
}
},
// SLA Management
{
name: 'list_slas',
description: 'Get list of all SLAs',
inputSchema: {
type: 'object',
properties: {}
}
},
{
name: 'get_sla',
description: 'Get SLA details by ID or name',
inputSchema: {
type: 'object',
properties: {
slaId: {
type: 'string',
description: 'SLA ID or name'
},
byName: {
type: 'boolean',
description: 'Whether to search by name instead of ID',
default: false
}
},
required: ['slaId']
}
},
// Operations and Monitoring
{
name: 'list_operations',
description: `Get list of operations (short info) with all supported filters from the NDB API.
**Available parameters:**
- dbserverId: Filter by database server ID
- eraServer: Filter by era server (boolean)
- ip: Filter by IP address
- clientId: Filter by client ID
- status: Filter by operation status
- type: Filter by operation type
- hideSubops: Hide sub-operations (boolean)
- systemTriggered: Filter by system-triggered operations (boolean)
- userTriggered: Filter by user-triggered operations (boolean)
- scheduled: Filter by scheduled operations (boolean)
- dateSubmitted: Filter by submission date (string)
- fromTime: Filter by start time (string)
- toTime: Filter by end time (string)
- days: Number of days to look back (string)
- entityId: Filter by entity ID
- entityName: Filter by entity name
- entityType: Filter by entity type
- timeZone: Time zone for timestamps (default: UTC)
- descending: Sort descending (boolean)
- operationId: Filter by operation ID
- timestamp: Filter by timestamp (string)
- limit: Limit number of results (string)
**Example usage:**
- List all failed operations in the last 7 days: status="FAILED", days="7"
- List operations for a specific database: entityId="<db-id>"
- List only user-triggered operations: userTriggered=true
`,
inputSchema: {
type: 'object',
properties: {
dbserverId: { type: 'string', description: 'Filter by database server ID' },
eraServer: { type: 'boolean', description: 'Filter by era server' },
ip: { type: 'string', description: 'Filter by IP address' },
clientId: { type: 'string', description: 'Filter by client ID' },
status: { type: 'string', description: 'Filter by operation status' },
type: { type: 'string', description: 'Filter by operation type' },
hideSubops: { type: 'boolean', description: 'Hide sub-operations' },
systemTriggered: { type: 'boolean', description: 'Filter by system-triggered operations' },
userTriggered: { type: 'boolean', description: 'Filter by user-triggered operations' },
scheduled: { type: 'boolean', description: 'Filter by scheduled operations' },
dateSubmitted: { type: 'string', description: 'Filter by submission date' },
fromTime: { type: 'string', description: 'Filter by start time' },
toTime: { type: 'string', description: 'Filter by end time' },
days: { type: 'string', description: 'Number of days to look back' },
entityId: { type: 'string', description: 'Filter by entity ID' },
entityName: { type: 'string', description: 'Filter by entity name' },
entityType: { type: 'string', description: 'Filter by entity type' },
timeZone: { type: 'string', description: 'Time zone for timestamps (default: UTC)' },
descending: { type: 'boolean', description: 'Sort descending' },
operationId: { type: 'string', description: 'Filter by operation ID' },
timestamp: { type: 'string', description: 'Filter by timestamp' },
limit: { type: 'string', description: 'Limit number of results' }
}
}
},
{
name: 'get_operation',
description: 'Get operation details by ID',
inputSchema: {
type: 'object',
properties: {
operationId: {
type: 'string',
description: 'Operation ID'
},
timeZone: {
type: 'string',
description: 'Time zone for timestamps',
default: 'UTC'
}
},
required: ['operationId']
}
},
// Alerts
{
name: 'list_alerts',
description: `Get list of all alerts with advanced filtering options. Returns a reduced set of fields for each alert.
**API-side filters:**
- resolved: Filter by resolution status
- timeInterval: Time interval filter
**Advanced Filtering (MCP server-side):**
- Use valueType/value to filter the result set after mapping. Supports all returned fields, including status, severity, type, entityType, entityId, entityName, dateCreated, resolved, acknowledged, etc.
- Operators: !value (not), >value/<value (comparison), *value* (contains)
- Multiple filters: combine with comma-separated valueType/value pairs
- Examples:
- Find unresolved critical alerts: valueType="status,severity,resolved", value="OPEN,CRITICAL,false"
- Find alerts for a specific entity: valueType="entityId", value="<id>"
- Find alerts acknowledged by a user: valueType="acknowledgedBy", value="*admin*"
`,
inputSchema: {
type: 'object',
properties: {
resolved: { type: 'string', description: 'Filter by resolution status' },
timeInterval: { type: 'string', description: 'Time interval filter' },
valueType: { type: 'string', description: 'Comma-separated list of attribute names to filter on (e.g. "status,severity")' },
value: { type: 'string', description: 'Comma-separated list of values corresponding to valueType' }
}
}
},
{
name: 'get_alert',
description: 'Get alert details by ID',
inputSchema: {
type: 'object',
properties: {
alertId: {
type: 'string',
description: 'Alert ID'
}
},
required: ['alertId']
}
},
// User Management
{
name: 'list_users',
description: `Get all NDB users. Filtering is done using valueType and value fields, which are used to filter the response sent by the NDB API, for flexible filtering.
You can filter by any of the following attribute names available in the user object:
- username: user login, string
- email: user email, string
- isExternalAuth: true if user is authenticated on an external directory (Active Directory), boolean
- passwordExpired: true if password is expired, boolean
Examples:
- To filter by username: valueType = "username", value = "admin"
- To filter by isExternalAuth: valueType = "isExternalAuth", value = "true"
- To filter by username and passwordExpired: valueType = "username,passwordExpired", value = "admin,true"
`,
inputSchema: {
type: 'object',
properties: {
valueType: {
type: 'string',
description: 'Comma-separated list of attribute names to filter on (e.g. "username", "username,isExternalAuth")',
},
value: {
type: 'string',
description: 'Comma-separated list of values corresponding to valueType (e.g. "admin", "admin,true")',
}
},
required: []
}
},
{
name: 'get_user',
description: 'Get user details by user ID',
inputSchema: {
type: 'object',
properties: {
userId: {
type: 'string',
description: 'User ID'
}
},
required: ['userId']
}
},
{
name: 'list_roles',
description: `List roles tool: calls /roles endpoint and returns an array of role objects.
**Returned Information:**
- id: string
- name: string
- description: string
- dateCreated: string
- dateModified: string
- createdBy: string
- systemRole: boolean
- internalRole: boolean
- privileges: string[]
**Use Case:**
- Retrieve all roles assigned to users in the system.
`,
inputSchema: {
type: 'object',
properties: {}
}
},
{
name: 'get_current_user',
description: `Get the current authenticated NDB user. Returns user details for the account associated with the current authentication context (API token or username/password).
**Returned fields:**
- id: User ID
- username: Login name
- email: Email address (if available)
- isExternalAuth: True if authenticated via external directory (e.g., Active Directory)
- passwordExpired: True if password is expired
- roles: Array of assigned roles (uuid), should be used with list_roles tools to get the role name
**Use Cases:**
- Identify the current user context for audit or troubleshooting
- Display user information in UI or logs
- Validate authentication and permissions
`,
inputSchema: {
type: 'object',
properties: {},
required: []
}
}
];