Skip to main content
Glama
pyproject.toml1.32 kB
[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "douyin-mcp-server" version = "1.1.0" description = "MCP server for downloading Douyin videos and extracting text" readme = "README.md" license = {text = "MIT"} authors = [ {name = "yzfly", email = "yz.liu.me@gmail.com"} ] classifiers = [ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ] requires-python = ">=3.10" dependencies = [ "mcp>=1.9.0", "requests", "ffmpeg-python", "tqdm", "toml", # 用于读取配置 ] [project.urls] Homepage = "https://github.com/yzfly/douyin-mcp-server" Repository = "https://github.com/yzfly/douyin-mcp-server" Issues = "https://github.com/yzfly/douyin-mcp-server/issues" [project.scripts] douyin-mcp-server = "douyin_mcp_server.server:main_cli" # 抖音MCP服务器配置 [tool.douyin-mcp-server] api_base_url = "https://api.siliconflow.cn/v1/audio/transcriptions" model = "FunAudioLLM/SenseVoiceSmall" # 服务器配置

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/xray918/douyin-mcp-server'

If you have feedback or need assistance with the MCP directory API, please join our Discord server