Skip to main content
Glama

optimize_memory

Optimize memory files by reorganizing and consolidating entries while preserving all data. Use AI-driven optimization to enhance efficiency and manage memory effectively within the MCP server.

Instructions

Manually optimize a memory file using AI to reorganize and consolidate entries while preserving all information.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
forceNoForce optimization regardless of criteria
memory_fileNo

Implementation Reference

  • The main handler function for the 'optimize_memory' MCP tool. It determines the target memory file, creates a MemoryOptimizer instance, calls its optimize_memory_if_needed method, and formats the result into a user-friendly string response.
    async def optimize_memory( ctx: Context, memory_file: Annotated[Optional[str], "Path to memory file to optimize"] = None, force: Annotated[bool, "Force optimization regardless of criteria"] = False, ) -> str: """Manually optimize a memory file using AI sampling.""" if read_only: return "Error: Server is running in read-only mode" try: # Determine which file to optimize if memory_file: file_path = Path(memory_file) if not file_path.exists(): return f"Error: Memory file not found: {memory_file}" else: # Use default user memory file user_memory_path = instruction_manager.get_memory_file_path() if not user_memory_path.exists(): return "Error: No user memory file found to optimize" file_path = user_memory_path # Create optimizer and run optimization optimizer = MemoryOptimizer(instruction_manager) result = await optimizer.optimize_memory_if_needed(file_path, ctx, force=force) # Format result message status = result.get("status", "unknown") if status == "optimized": entries_before = result.get("entries_before", "unknown") entries_after = result.get("entries_after", "unknown") backup_created = result.get("backup_created", False) message = f"โœ… Memory optimization completed successfully!\n" message += f"๐Ÿ“Š Entries: {entries_before} โ†’ {entries_after}\n" message += f"๐Ÿ”„ Method: {result.get('method', 'ai')}\n" message += f"๐Ÿ’พ Backup created: {'Yes' if backup_created else 'No'}\n" message += f"๐Ÿ“ Reason: {result.get('reason', 'Manual optimization')}" elif status == "metadata_updated": message = f"๐Ÿ“ Memory metadata updated (AI optimization unavailable)\n" message += f"๐Ÿ’พ Backup created: {'Yes' if result.get('backup_created', False) else 'No'}\n" message += f"๐Ÿ“ Reason: {result.get('reason', 'Manual optimization')}" elif status == "skipped": message = f"โญ๏ธ Optimization skipped: {result.get('reason', 'Unknown reason')}\n" message += f"๐Ÿ’ก Use force=True to optimize anyway" elif status == "error": message = f"โŒ Optimization failed: {result.get('reason', 'Unknown error')}" else: message = f"๐Ÿ” Optimization result: {status}" return message except Exception as e: return f"Error during memory optimization: {str(e)}" @app.tool(
  • The FastMCP tool registration decorator for 'optimize_memory', including description, tags, parameter annotations (schema), and metadata.
    @app.tool( name="optimize_memory", description="Manually optimize a memory file using AI to reorganize and consolidate entries while preserving all information.", tags={"public", "memory"}, annotations={ "idempotentHint": False, "readOnlyHint": False, "title": "Optimize Memory File", "parameters": { "memory_file": "Optional path to specific memory file. If not provided, will optimize the user's main memory file.", "force": "Force optimization even if criteria are not met. Defaults to False.", }, "returns": "Returns detailed results of the optimization process including status, entries before/after, and backup information.", }, meta={ "category": "memory", }, )
  • Core optimization logic in MemoryOptimizer class. Checks if optimization needed, performs AI-based reorganization of memory content, updates metadata, writes back to file with backup, handles errors and fallbacks.
    async def optimize_memory_if_needed(self, file_path: Path, ctx: Context, force: bool = False) -> Dict[str, Any]: """ Main optimization method with full backward compatibility. Args: file_path: Path to memory file ctx: FastMCP context for AI sampling force: Force optimization regardless of criteria Returns: Dict with optimization results """ try: # Get metadata (with backward compatibility) metadata = self._get_memory_metadata(file_path) # Check if optimization is needed if not force: should_optimize, reason = self._should_optimize_memory(file_path, metadata) if not should_optimize: return {"status": "skipped", "reason": reason, "metadata": metadata} else: reason = "Forced optimization" # Read current content frontmatter, content = parse_frontmatter_file(file_path) full_content = f"---\n" for key, value in frontmatter.items(): if isinstance(value, str) and ('"' in value or "'" in value): full_content += f'{key}: "{value}"\n' else: full_content += f"{key}: {value}\n" full_content += f"---\n{content}" logger.info(f"Starting memory optimization: {reason}") # Try AI optimization optimized_content = await self._optimize_memory_with_ai(ctx, full_content) if optimized_content: # Parse optimized content directly from string optimized_frontmatter, optimized_body = parse_frontmatter(optimized_content) # Update metadata in the optimized frontmatter entry_count = self._count_memory_entries(optimized_body) optimized_frontmatter.update( { "lastOptimized": datetime.datetime.now(datetime.timezone.utc).isoformat(), "entryCount": entry_count, "optimizationVersion": frontmatter.get("optimizationVersion", 0) + 1, } ) # Preserve user preferences from original frontmatter for key in ["autoOptimize", "sizeThreshold", "entryThreshold", "timeThreshold"]: if key in frontmatter: optimized_frontmatter[key] = frontmatter[key] elif key not in optimized_frontmatter: # Set sensible defaults for new files defaults = {"autoOptimize": True, "sizeThreshold": 50000, "entryThreshold": 20, "timeThreshold": 7} optimized_frontmatter[key] = defaults[key] # Write optimized content success = write_frontmatter_file(file_path, optimized_frontmatter, optimized_body, create_backup=True) # Determine if backup was actually created (skipped for git repos) backup_created = False if _is_in_git_repository(file_path) else success if success: logger.info(f"Memory optimization completed successfully") return {"status": "optimized", "reason": reason, "method": "ai", "entries_before": metadata.get("entryCount", 0), "entries_after": entry_count, "backup_created": backup_created} else: return {"status": "error", "reason": "Failed to write optimized content"} else: # AI optimization failed, just update metadata logger.info("AI optimization unavailable, updating metadata only") success = self._update_metadata(file_path, content) # Determine if backup was actually created (skipped for git repos) backup_created = False if _is_in_git_repository(file_path) else success return {"status": "metadata_updated", "reason": reason, "method": "metadata_only", "ai_available": False, "backup_created": backup_created} except Exception as e: logger.error(f"Memory optimization failed: {e}") return {"status": "error", "reason": str(e)}
  • Helper function that uses AI (ctx.sample) to optimize the memory content by reorganizing into sections, removing duplicates, while preserving all info and timestamps. Includes validation and error handling.
    async def _optimize_memory_with_ai(self, ctx: Context, content: str) -> Optional[str]: """Safely optimize memory content using AI sampling with comprehensive error handling.""" try: response = await ctx.sample( f"""Please optimize this AI memory file by: 1. **Preserve ALL information** - Do not delete any memories or important details 2. **Remove duplicates** - Consolidate identical or very similar entries 3. **Organize by sections** - Group related memories under clear headings: - ## Personal Context (name, location, role, etc.) - ## Professional Context (team, goals, projects, etc.) - ## Technical Preferences (coding styles, tools, workflows) - ## Communication Preferences (style, feedback preferences) - ## Universal Laws (strict rules that must always be followed) - ## Policies (guidelines and standards) - ## Suggestions/Hints (recommendations and tips) - ## Memories/Facts (chronological events and information) 4. **Maintain timestamps** - Keep all original timestamps for traceability 5. **Improve formatting** - Use consistent markdown formatting 6. **Preserve frontmatter structure** - Keep the YAML header intact Return ONLY the optimized content (including frontmatter), nothing else: {content}""", temperature=0.1, # Very low for consistency max_tokens=4000, model_preferences=["gpt-4", "claude-3-sonnet"], # Prefer more reliable models ) if response and hasattr(response, "text"): text_attr = getattr(response, "text", None) optimized_content = str(text_attr).strip() if text_attr else None # Basic validation - ensure we still have a memories section if optimized_content and ("## Memories" in optimized_content or "# Personal" in optimized_content): return optimized_content else: logger.warning("AI optimization removed essential sections, reverting to original") return None else: logger.warning(f"AI optimization returned unexpected type or no text: {type(response)}") return None except Exception as e: logger.info(f"AI optimization failed: {e}") return None
  • Helper function that determines if optimization is needed based on configurable thresholds for file size, new entries, time since last optimization, and auto-optimize setting.
    def _should_optimize_memory(self, file_path: Path, metadata: Dict[str, Any]) -> Tuple[bool, str]: """ Determine if memory file should be optimized. Returns (should_optimize, reason) """ # Check if auto-optimization is disabled if not metadata.get("autoOptimize", True): return False, "Auto-optimization disabled" # File size check file_size = file_path.stat().st_size size_threshold = metadata.get("sizeThreshold", 50000) if file_size > size_threshold: return True, f"File size ({file_size} bytes) exceeds threshold ({size_threshold} bytes)" # Entry count check try: _, content = parse_frontmatter_file(file_path) current_entries = self._count_memory_entries(content) last_count = metadata.get("entryCount", 0) entry_threshold = metadata.get("entryThreshold", 20) new_entries = current_entries - last_count if new_entries >= entry_threshold: return True, f"New entries ({new_entries}) exceed threshold ({entry_threshold})" except Exception as e: logger.warning(f"Could not count entries: {e}") # Time-based check last_optimized = metadata.get("lastOptimized") if last_optimized: try: last_opt_time = datetime.datetime.fromisoformat(last_optimized.replace("Z", "+00:00")) time_threshold = metadata.get("timeThreshold", 7) # days days_since = (datetime.datetime.now(datetime.timezone.utc) - last_opt_time).days if days_since >= time_threshold: return True, f"Days since last optimization ({days_since}) exceed threshold ({time_threshold})" except Exception as e: logger.warning(f"Could not parse last optimization time: {e}") # If we can't parse the time, consider it old enough to optimize return True, "Could not determine last optimization time" else: # No last optimization time means this is an existing file without metadata return True, "No previous optimization recorded (legacy file)" return False, "No optimization criteria met"

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/NiclasOlofsson/mode-manager-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server