get_cpu_info_tool
Retrieve CPU usage and system information. Adjust measurement interval and optionally include per-CPU core breakdown.
Instructions
Retrieve CPU usage and information.
Args: interval: Measurement interval in seconds (default: 1.0) per_cpu: Include per-CPU core breakdown (default: false)
Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| interval | No | ||
| per_cpu | No |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- src/system_info_mcp/server.py:35-43 (registration)Registration of get_cpu_info_tool via @app.tool() decorator on FastMCP. This is the MCP tool handler that delegates to get_cpu_info in tools.py.
@app.tool() def get_cpu_info_tool(interval: float = 1.0, per_cpu: bool = False) -> Dict[str, Any]: """Retrieve CPU usage and information. Args: interval: Measurement interval in seconds (default: 1.0) per_cpu: Include per-CPU core breakdown (default: false) """ return get_cpu_info(interval=interval, per_cpu=per_cpu) - src/system_info_mcp/tools.py:22-78 (handler)Actual implementation (get_cpu_info) of the CPU info logic. Uses psutil to gather CPU usage percentage, per-CPU breakdown, logical/physical CPU counts, CPU frequency, and load average. Cached with ttl=2 seconds using @cache_result.
@cache_result("cpu_info", ttl=2) def get_cpu_info(interval: float = 1.0, per_cpu: bool = False) -> Dict[str, Any]: """Retrieve CPU usage and information.""" try: # Validate parameters if interval <= 0: raise ValueError("Interval must be a positive number") # Get CPU percentage (this call blocks for the interval) cpu_percent = psutil.cpu_percent(interval=interval) # Get per-CPU percentages if requested per_cpu_percent = None if per_cpu: per_cpu_percent = psutil.cpu_percent(interval=0, percpu=True) # Get CPU counts cpu_count_logical = psutil.cpu_count(logical=True) or 0 cpu_count_physical = psutil.cpu_count(logical=False) or 0 # Get CPU frequency try: cpu_freq = psutil.cpu_freq() cpu_freq_current = safe_float(cpu_freq.current if cpu_freq else 0) cpu_freq_max = safe_float(cpu_freq.max if cpu_freq else 0) except (AttributeError, OSError): cpu_freq_current = 0.0 cpu_freq_max = 0.0 # Get load average (Unix-like systems only) try: if hasattr(os, 'getloadavg'): load_avg = os.getloadavg() load_average = [round(avg, 2) for avg in load_avg] else: load_average = [0.0, 0.0, 0.0] except (AttributeError, OSError): load_average = [0.0, 0.0, 0.0] result = { "cpu_percent": round(cpu_percent, 1), "cpu_count_logical": cpu_count_logical, "cpu_count_physical": cpu_count_physical, "cpu_freq_current": cpu_freq_current, "cpu_freq_max": cpu_freq_max, "load_average": load_average, } if per_cpu_percent is not None: result["per_cpu_percent"] = [round(p, 1) for p in per_cpu_percent] return result except Exception as e: logger.error(f"Error getting CPU info: {e}") raise - src/system_info_mcp/utils.py:55-107 (helper)cache_result decorator utility used by get_cpu_info to cache results (ttl=2s for cpu_info). Supports both sync and async functions with TTL-based expiry.
def cache_result(cache_key: str, ttl: Optional[int] = None) -> Any: """Decorator to cache function results with TTL.""" if ttl is None: ttl = config.cache_ttl def decorator(func: Callable[..., T]) -> Callable[..., T]: @wraps(func) async def async_wrapper(*args: Any, **kwargs: Any) -> T: current_time = time.time() # Check if we have cached result if cache_key in _cache: cache_entry = _cache[cache_key] if current_time - cache_entry["timestamp"] < ttl: logger.debug(f"Cache hit for {cache_key}") return cache_entry["data"] # Get fresh data logger.debug(f"Cache miss for {cache_key}, fetching fresh data") if asyncio.iscoroutinefunction(func): result = await func(*args, **kwargs) else: result = func(*args, **kwargs) # Cache the result _cache[cache_key] = {"data": result, "timestamp": current_time} return result @wraps(func) def sync_wrapper(*args: Any, **kwargs: Any) -> T: current_time = time.time() # Check if we have cached result if cache_key in _cache: cache_entry = _cache[cache_key] if current_time - cache_entry["timestamp"] < ttl: logger.debug(f"Cache hit for {cache_key}") return cache_entry["data"] # Get fresh data logger.debug(f"Cache miss for {cache_key}, fetching fresh data") result = func(*args, **kwargs) # Cache the result _cache[cache_key] = {"data": result, "timestamp": current_time} return result # Return appropriate wrapper based on whether function is async return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper return decorator - src/system_info_mcp/tools.py:8-18 (helper)Imports from utils used by get_cpu_info: safe_float for safe numeric conversion, logger for error reporting.
from .utils import ( bytes_to_gb, bytes_to_mb, format_uptime, timestamp_to_iso, cache_result, safe_float, safe_int, filter_sensitive_cmdline, logger, ) - src/system_info_mcp/server.py:8-17 (helper)Import statement that imports get_cpu_info from .tools module, linking the server registration to the actual handler.
from .config import config from .tools import ( get_cpu_info, get_memory_info, get_disk_info, get_network_info, get_process_list, get_system_uptime, get_temperature_info, )