get_memory_info_tool
Retrieve memory usage statistics to monitor system RAM utilization and free memory.
Instructions
Retrieve memory usage statistics.
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- src/system_info_mcp/server.py:46-49 (registration)Registration of the 'get_memory_info_tool' MCP tool via the @app.tool() decorator in the FastMCP server. It is a thin wrapper that delegates to get_memory_info().
@app.tool() def get_memory_info_tool() -> Dict[str, Any]: """Retrieve memory usage statistics.""" return get_memory_info() - src/system_info_mcp/tools.py:80-111 (handler)Actual implementation of memory info retrieval. Uses psutil.virtual_memory() and psutil.swap_memory() to collect memory statistics, returning dicts with virtual and swap memory details. Cached with a 1-second TTL.
@cache_result("memory_info", ttl=1) def get_memory_info() -> Dict[str, Any]: """Retrieve memory usage statistics.""" try: # Get virtual memory info virtual_mem = psutil.virtual_memory() # Get swap memory info swap_mem = psutil.swap_memory() return { "virtual_memory": { "total": virtual_mem.total, "available": virtual_mem.available, "used": virtual_mem.used, "percent": round(virtual_mem.percent, 1), "total_gb": bytes_to_gb(virtual_mem.total), "available_gb": bytes_to_gb(virtual_mem.available), "used_gb": bytes_to_gb(virtual_mem.used), }, "swap_memory": { "total": swap_mem.total, "used": swap_mem.used, "free": swap_mem.free, "percent": round(swap_mem.percent, 1), "total_gb": bytes_to_gb(swap_mem.total), }, } except Exception as e: logger.error(f"Error getting memory info: {e}") raise - src/system_info_mcp/utils.py:23-25 (helper)Helper function used by get_memory_info to convert byte values to gigabytes for human-readable output.
def bytes_to_gb(bytes_value: int) -> float: """Convert bytes to gigabytes with 1 decimal precision.""" return round(bytes_value / (1024**3), 1) - src/system_info_mcp/utils.py:55-107 (helper)Decorator used to cache get_memory_info results with a 1-second TTL, reducing redundant psutil calls.
def cache_result(cache_key: str, ttl: Optional[int] = None) -> Any: """Decorator to cache function results with TTL.""" if ttl is None: ttl = config.cache_ttl def decorator(func: Callable[..., T]) -> Callable[..., T]: @wraps(func) async def async_wrapper(*args: Any, **kwargs: Any) -> T: current_time = time.time() # Check if we have cached result if cache_key in _cache: cache_entry = _cache[cache_key] if current_time - cache_entry["timestamp"] < ttl: logger.debug(f"Cache hit for {cache_key}") return cache_entry["data"] # Get fresh data logger.debug(f"Cache miss for {cache_key}, fetching fresh data") if asyncio.iscoroutinefunction(func): result = await func(*args, **kwargs) else: result = func(*args, **kwargs) # Cache the result _cache[cache_key] = {"data": result, "timestamp": current_time} return result @wraps(func) def sync_wrapper(*args: Any, **kwargs: Any) -> T: current_time = time.time() # Check if we have cached result if cache_key in _cache: cache_entry = _cache[cache_key] if current_time - cache_entry["timestamp"] < ttl: logger.debug(f"Cache hit for {cache_key}") return cache_entry["data"] # Get fresh data logger.debug(f"Cache miss for {cache_key}, fetching fresh data") result = func(*args, **kwargs) # Cache the result _cache[cache_key] = {"data": result, "timestamp": current_time} return result # Return appropriate wrapper based on whether function is async return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper return decorator