get_platform_status
Check Docker/Podman and GPU availability to verify platform readiness for vLLM operations across Linux, macOS, and Windows systems.
Instructions
Get platform information including Docker and GPU availability
Input Schema
TableJSON Schema
| Name | Required | Description | Default |
|---|---|---|---|
No arguments | |||
Implementation Reference
- The main handler function for get_platform_status tool. Retrieves platform information using get_platform_info() and formats it into a TextContent response with platform type, container runtime status, GPU availability, cache path, and notes.
async def get_platform_status(arguments: dict[str, Any]) -> list[TextContent]: """ Get detailed platform and container runtime status information. Returns: List of TextContent with platform information. """ platform_info = await get_platform_info() # Platform emoji platform_emoji = { Platform.LINUX: "π§", Platform.MACOS_ARM: "π", Platform.MACOS_INTEL: "π", Platform.WINDOWS: "πͺ", Platform.UNKNOWN: "β", } emoji = platform_emoji.get(platform_info.platform, "β") # Runtime status if platform_info.container_runtime == ContainerRuntime.NONE: runtime_status = "β Not installed" runtime_name = "None" elif platform_info.runtime_running: runtime_status = "β Running" runtime_name = platform_info.container_runtime.value.capitalize() else: runtime_status = "β οΈ Installed but not running" runtime_name = platform_info.container_runtime.value.capitalize() gpu_status = "β Available" if platform_info.has_nvidia_gpu else "β Not available" notes_text = "\n".join(f" - {note}" for note in platform_info.notes) if platform_info.notes else " - None" return [TextContent( type="text", text=f"## Platform Status {emoji}\n\n" f"**Platform:** {platform_info.platform.value}\n" f"**Container Runtime:** {runtime_name} ({runtime_status})\n" f"**NVIDIA GPU:** {gpu_status}\n" f"**HF Cache Path:** `{platform_info.cache_path}`\n" f"**GPU Flags:** `{' '.join(platform_info.gpu_flags) or 'None (CPU mode)'}`\n" f"\n**Notes:**\n{notes_text}" )] - src/vllm_mcp_server/server.py:284-291 (registration)MCP tool registration for get_platform_status. Defines the tool name, description, and input schema (empty object type since no parameters are required).
Tool( name="get_platform_status", description="Get platform information including Docker and GPU availability", inputSchema={ "type": "object", "properties": {}, }, ), - Type definitions used by get_platform_status: Platform enum (LINUX, MACOS_ARM, MACOS_INTEL, WINDOWS, UNKNOWN), ContainerRuntime enum (PODMAN, DOCKER, NONE), and PlatformInfo dataclass containing platform detection results.
class Platform(Enum): """Supported platforms.""" LINUX = "linux" MACOS_ARM = "macos_arm" MACOS_INTEL = "macos_intel" WINDOWS = "windows" UNKNOWN = "unknown" class ContainerRuntime(Enum): """Supported container runtimes.""" PODMAN = "podman" DOCKER = "docker" NONE = "none" @dataclass class PlatformInfo: """Platform-specific information.""" platform: Platform container_runtime: ContainerRuntime has_nvidia_gpu: bool runtime_available: bool runtime_running: bool cache_path: str gpu_flags: list[str] notes: list[str] - Helper function get_platform_info() called by get_platform_status handler. Detects the current platform, container runtime availability, NVIDIA GPU presence, and compiles platform-specific configuration including GPU flags and notes.
async def get_platform_info() -> PlatformInfo: """Get comprehensive platform information.""" plat = _detect_platform() runtime, runtime_available, runtime_running, _ = await _detect_container_runtime() has_nvidia = await _check_nvidia_gpu(runtime) if runtime_running else False notes: list[str] = [] gpu_flags: list[str] = [] # Runtime info if runtime == ContainerRuntime.PODMAN: notes.append("Using Podman as container runtime") elif runtime == ContainerRuntime.DOCKER: notes.append("Using Docker as container runtime") else: notes.append("No container runtime available") if plat == Platform.LINUX: if has_nvidia: if runtime == ContainerRuntime.PODMAN: # Podman uses --device for GPU access with CDI gpu_flags = ["--device", "nvidia.com/gpu=all"] else: gpu_flags = ["--gpus", "all"] notes.append("NVIDIA GPU detected - full GPU acceleration available") else: notes.append("No NVIDIA GPU detected - running in CPU mode") elif plat == Platform.MACOS_ARM: notes.append("Apple Silicon detected - containers run in CPU mode") notes.append("For GPU acceleration, consider running vLLM natively with Metal") elif plat == Platform.MACOS_INTEL: notes.append("Intel Mac detected - containers run in CPU mode") elif plat == Platform.WINDOWS: if has_nvidia: gpu_flags = ["--gpus", "all"] notes.append("NVIDIA GPU detected via WSL2 - GPU acceleration available") else: notes.append("No NVIDIA GPU detected - running in CPU mode") notes.append("Ensure WSL2 and NVIDIA Container Toolkit are installed for GPU support") return PlatformInfo( platform=plat, container_runtime=runtime, has_nvidia_gpu=has_nvidia, runtime_available=runtime_available, runtime_running=runtime_running, cache_path=_get_cache_path(plat), gpu_flags=gpu_flags, notes=notes, ) - src/vllm_mcp_server/server.py:363-364 (registration)Routing logic in handle_tool_request that dispatches to the get_platform_status handler when the tool name matches.
elif name == "get_platform_status": return await get_platform_status(arguments)