We provide all the information about MCP servers via our MCP API.
curl -X GET 'https://glama.ai/api/mcp/v1/servers/ptmorris05/scalene-mcp'
If you have feedback or need assistance with the MCP directory API, please join our Discord server
{
"elapsed_time_seconds": 2.456,
"max_footprint_mb": 128.5,
"growth_rate": 15.3,
"samples": [],
"files": {
"leaky.py": {
"functions": [
{
"name": "leak_memory",
"lineno": 8,
"n_cpu_percent_python": 45.2,
"n_cpu_percent_c": 0.0,
"n_sys_percent": 12.3,
"n_gpu_percent": 0.0,
"n_peak_mb": 105.0,
"n_avg_mb": 52.5
}
],
"lines": [
{
"lineno": 8,
"line": "def leak_memory() -> None:",
"n_cpu_percent_python": 0.0,
"n_cpu_percent_c": 0.0,
"n_sys_percent": 0.0,
"n_gpu_percent": 0.0,
"n_peak_mb": 0.0,
"n_avg_mb": 0.0,
"n_malloc_mb": 0.0,
"n_mallocs": 0,
"n_usage_fraction": 0.0,
"memory_samples": [],
"cpu_samples_list": []
},
{
"lineno": 10,
"line": " for _ in range(100):",
"n_cpu_percent_python": 5.2,
"n_cpu_percent_c": 0.0,
"n_sys_percent": 0.0,
"n_gpu_percent": 0.0,
"n_peak_mb": 0.0,
"n_avg_mb": 0.0,
"n_malloc_mb": 0.0,
"n_mallocs": 0,
"n_usage_fraction": 0.0,
"memory_samples": [],
"cpu_samples_list": []
},
{
"lineno": 12,
"line": " leaked_memory.append(b\"x\" * (1024 * 1024))",
"n_cpu_percent_python": 25.3,
"n_cpu_percent_c": 0.0,
"n_sys_percent": 8.7,
"n_gpu_percent": 0.0,
"n_peak_mb": 105.0,
"n_avg_mb": 52.5,
"n_malloc_mb": 100.0,
"n_mallocs": 100,
"n_usage_fraction": 0.95,
"memory_samples": [
[
0.0,
0.0
],
[
0.5,
50.0
],
[
1.0,
100.0
],
[
1.5,
105.0
]
],
"cpu_samples_list": []
},
{
"lineno": 13,
"line": " time.sleep(0.01)",
"n_cpu_percent_python": 0.0,
"n_cpu_percent_c": 0.0,
"n_sys_percent": 3.6,
"n_gpu_percent": 0.0,
"n_peak_mb": 0.0,
"n_avg_mb": 0.0,
"n_malloc_mb": 0.0,
"n_mallocs": 0,
"n_usage_fraction": 0.0,
"memory_samples": [],
"cpu_samples_list": []
}
],
"leaks": {
"12": {
"likelihood": 0.92,
"velocity_mb_s": 42.5
}
}
}
}
}