Skip to main content
Glama

maintenance

Perform system maintenance operations including cleaning expired database entries, removing old local files, checking storage quotas, and resolving database inconsistencies to optimize performance.

Instructions

Perform maintenance operations following workflows.md patterns.

Available operations:

  • cleanup_expired: Remove expired Files API entries from database

  • cleanup_local: Clean old local files based on age/LRU

  • check_quota: Check Files API storage usage vs. ~20GB budget

  • database_hygiene: Clean up database inconsistencies

  • full_cleanup: Run all cleanup operations in sequence

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
dry_runNoIf true, only report what would be done without making changes
keep_countNoFor local cleanup: minimum number of recent files to keep
max_age_hoursNoFor local cleanup: maximum age in hours (default: 168 = 1 week)
operationYesMaintenance operation to perform: 'cleanup_expired', 'cleanup_local', 'check_quota', 'database_hygiene', 'full_cleanup'

Implementation Reference

  • The primary handler for the 'maintenance' tool. Defines input schema via Annotated Fields, dispatches to MaintenanceService based on 'operation' parameter, formats output summaries, and returns ToolResult.
    @server.tool( annotations={ "title": "Maintenance and cleanup operations", "readOnlyHint": True, "openWorldHint": True, } ) def maintenance( operation: Annotated[ str, Field( description="Maintenance operation to perform: " "'cleanup_expired', 'cleanup_local', 'check_quota', 'database_hygiene', 'full_cleanup'" ), ], dry_run: Annotated[ bool, Field(description="If true, only report what would be done without making changes"), ] = True, max_age_hours: Annotated[ Optional[int], Field( description="For local cleanup: maximum age in hours (default: 168 = 1 week)", ge=1, le=8760, ), ] = None, keep_count: Annotated[ Optional[int], Field( description="For local cleanup: minimum number of recent files to keep", ge=1, le=1000, ), ] = None, ctx: Context = None, ) -> ToolResult: """ Perform maintenance operations following workflows.md patterns. Available operations: - cleanup_expired: Remove expired Files API entries from database - cleanup_local: Clean old local files based on age/LRU - check_quota: Check Files API storage usage vs. ~20GB budget - database_hygiene: Clean up database inconsistencies - full_cleanup: Run all cleanup operations in sequence """ logger = logging.getLogger(__name__) try: logger.info(f"Maintenance operation: {operation}, dry_run={dry_run}") # Get services (would be injected in real implementation) maintenance_service = _get_maintenance_service() # Validate operation valid_operations = [ "cleanup_expired", "cleanup_local", "check_quota", "database_hygiene", "full_cleanup", ] if operation not in valid_operations: raise ValidationError( f"Invalid operation. Must be one of: {', '.join(valid_operations)}" ) # Execute maintenance operation if operation == "cleanup_expired": result = maintenance_service.cleanup_expired_files(dry_run=dry_run) summary = _format_expired_cleanup_summary(result, dry_run) elif operation == "cleanup_local": result = maintenance_service.cleanup_local_files( dry_run=dry_run, max_age_hours=max_age_hours or 168, # 1 week default keep_count=keep_count or 10, # Keep at least 10 recent files ) summary = _format_local_cleanup_summary(result, dry_run) elif operation == "check_quota": result = maintenance_service.check_storage_quota() summary = _format_quota_summary(result) elif operation == "database_hygiene": result = maintenance_service.database_hygiene(dry_run=dry_run) summary = _format_database_hygiene_summary(result, dry_run) elif operation == "full_cleanup": result = maintenance_service.full_maintenance_cycle( dry_run=dry_run, max_age_hours=max_age_hours or 168, keep_count=keep_count or 10 ) summary = _format_full_cleanup_summary(result, dry_run) else: # This shouldn't happen due to validation above raise ValidationError(f"Unhandled operation: {operation}") content = [TextContent(type="text", text=summary)] structured_content = { "operation": operation, "dry_run": dry_run, "workflow": "workflows.md_maintenance_sequence", "result": result, "parameters": {"max_age_hours": max_age_hours, "keep_count": keep_count}, } logger.info(f"Maintenance operation {operation} completed successfully") return ToolResult(content=content, structured_content=structured_content) except ValidationError as e: logger.error(f"Validation error in maintenance: {e}") raise except Exception as e: logger.error(f"Unexpected error in maintenance: {e}") raise
  • Function that registers the 'maintenance' tool on the FastMCP server instance using the @server.tool decorator, which also embeds the schema annotations.
    def register_maintenance_tool(server: FastMCP): """Register the maintenance tool with the FastMCP server.""" @server.tool( annotations={ "title": "Maintenance and cleanup operations", "readOnlyHint": True, "openWorldHint": True, } )
  • The site where register_maintenance_tool is invoked during server initialization in the _register_tools method, completing the tool registration.
    from ..tools.maintenance import register_maintenance_tool register_generate_image_tool(self.server) register_upload_file_tool(self.server) register_output_stats_tool(self.server) register_maintenance_tool(self.server)
  • Key helper method in MaintenanceService implementing concrete local filesystem cleanup logic (LRU/age-based), called by the tool handler for 'cleanup_local' operation.
    def cleanup_local_files( self, dry_run: bool = True, max_age_hours: int = 168, # 1 week keep_count: int = 10, ) -> Dict[str, Any]: """ Clean up old local files based on age and LRU policy. Implements: "Local LRU/age-based cleanup of OUT_DIR" Args: dry_run: If True, only report what would be cleaned max_age_hours: Files older than this are candidates for removal keep_count: Always keep at least this many recent files Returns: Dictionary with cleanup statistics """ try: self.logger.info( f"Starting local file cleanup (dry_run={dry_run}, " f"max_age_hours={max_age_hours}, keep_count={keep_count})" ) stats = { "total_files": 0, "removed_count": 0, "kept_count": 0, "freed_mb": 0.0, "errors": [], } # Get all image files in output directory image_files = [] for ext in ["*.jpg", "*.jpeg", "*.png", "*.webp"]: image_files.extend(Path(self.out_dir).glob(f"**/{ext}")) # Sort by modification time (newest first) image_files.sort(key=lambda p: p.stat().st_mtime, reverse=True) stats["total_files"] = len(image_files) # Cutoff time for old files cutoff_time = datetime.now() - timedelta(hours=max_age_hours) cutoff_timestamp = cutoff_time.timestamp() removed_count = 0 freed_bytes = 0 for i, file_path in enumerate(image_files): try: file_stat = file_path.stat() # Always keep the most recent files if i < keep_count: continue # Check if file is old enough to remove if file_stat.st_mtime > cutoff_timestamp: continue # Check if file is still referenced in database db_record = self.db_service.get_by_path(str(file_path)) if db_record and db_record.file_id: # File is still referenced in Files API, keep it self.logger.debug(f"Keeping referenced file: {file_path}") continue # File is eligible for removal file_size = file_stat.st_size if not dry_run: file_path.unlink() # Also remove corresponding thumbnail if it exists thumb_path = file_path.with_name(file_path.stem + "_thumb.jpeg") if thumb_path.exists(): thumb_size = thumb_path.stat().st_size thumb_path.unlink() file_size += thumb_size removed_count += 1 freed_bytes += file_size self.logger.debug(f"{'Would remove' if dry_run else 'Removed'}: {file_path}") except Exception as e: error_msg = f"Error processing {file_path}: {e}" stats["errors"].append(error_msg) self.logger.error(error_msg) stats["removed_count"] = removed_count stats["kept_count"] = stats["total_files"] - removed_count stats["freed_mb"] = freed_bytes / (1024 * 1024) self.logger.info(f"Local cleanup complete: {stats}") return stats except Exception as e: self.logger.error(f"Local files cleanup failed: {e}") return { "total_files": 0, "removed_count": 0, "kept_count": 0, "freed_mb": 0.0, "errors": [str(e)], }

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/zhongweili/nanobanana-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server