Skip to main content
Glama

Stem MCP Server

by tolutronics
pyproject.toml1.16 kB
[build-system] requires = ["setuptools>=61.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "stem-mcp" version = "0.1.0" description = "MCP server for audio stem processing: generation, splitting, and loop creation" readme = "README.md" requires-python = ">=3.10" authors = [ {name = "Tolutronics", email = "toluxy100@gmail.com"} ] dependencies = [ "mcp>=1.0.0", "librosa>=0.10.1", "soundfile>=0.12.1", "numpy>=1.24.0,<3.0.0", "scipy>=1.10.0", "torch>=2.0.0", "torchaudio>=2.0.0", "demucs>=4.0.1", "pydub>=0.25.1", "click>=8.1.0", "typing-extensions>=4.5.0", ] [project.optional-dependencies] dev = [ "pytest>=7.0", "pytest-asyncio>=0.21.0", "black>=23.0.0", "isort>=5.12.0", "mypy>=1.5.0", "pre-commit>=3.0.0", ] [project.scripts] stem-mcp = "stem_mcp.server:main" [tool.setuptools.packages.find] where = ["src"] [tool.setuptools.package-dir] "" = "src" [tool.black] line-length = 88 target-version = ['py310'] [tool.isort] profile = "black" line_length = 88 [tool.mypy] python_version = "3.10" strict = true warn_return_any = true warn_unused_configs = true

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/tolutronics/audio-processing-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server