Skip to main content
Glama

discover_niche_trends

Identify emerging niche trends within a topic by analyzing video titles to find high-performing keyword combinations for content creation.

Instructions

智能发现某个主题下的细分爆款领域。通过分析大量视频标题,自动识别高 VPH 的关键词组合,例如在 'AI' 主题下发现 'AI Kpop'、'AI 印度故事' 等细分趋势。

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
main_topicYes主题关键词(如 'AI', 'tutorial', 'funny')
hours_agoNo时间范围(小时),默认 24 小时
min_videosNo细分领域最少视频数量,默认 3
top_nichesNo返回前 N 个细分领域,默认 10

Implementation Reference

  • Main handler implementation of discover_niche_trends tool. Analyzes video titles to extract keyword combinations (bigrams/trigrams), calculates average VPH (Views Per Hour) for each niche, filters by minimum video count, ranks by average VPH, and returns a detailed Markdown report of top performing sub-niches within a main topic.
    async def discover_niche_trends(
        main_topic: str,
        hours_ago: int = 24,
        min_videos: int = 3,
        top_niches: int = 10
    ) -> list[TextContent]:
        """
        工具 5 的实现:发现细分爆款领域
        
        算法流程:
        1. 搜索主题关键词,获取大量视频
        2. 提取标题中的关键词组合(bigram/trigram)
        3. 统计每个细分领域的视频数量和平均 VPH
        4. 返回 VPH 最高的细分领域
        """
        
        try:
            from collections import defaultdict
            from datetime import datetime, timezone
            
            client = YouTubeClient()
            analyzer = ViralAnalyzer()
            
            # 获取更多视频用于分析(最多 50 个)
            videos = client.get_trending_shorts(
                keyword=main_topic,
                hours_ago=hours_ago,
                max_results=50
            )
            
            if not videos:
                return [TextContent(
                    type="text",
                    text=f"未找到 '{main_topic}' 相关的视频"
                )]
            
            # 分析视频(计算 VPH)
            videos = analyzer.analyze_videos(videos)
            
            # 提取关键词组合
            niche_data = defaultdict(lambda: {
                'videos': [],
                'total_vph': 0,
                'total_views': 0,
                'count': 0
            })
            
            for video in videos:
                # 提取标题中的关键词组合
                title_lower = video.title.lower()
                words = re.findall(r'\b\w+\b', title_lower)
                
                # 生成 bigram (2个词的组合)
                for i in range(len(words) - 1):
                    bigram = f"{words[i]} {words[i+1]}"
                    # 必须包含主题词
                    if main_topic.lower() in bigram:
                        niche_data[bigram]['videos'].append(video)
                        niche_data[bigram]['total_vph'] += video.vph
                        niche_data[bigram]['total_views'] += video.views
                        niche_data[bigram]['count'] += 1
                
                # 生成 trigram (3个词的组合)
                for i in range(len(words) - 2):
                    trigram = f"{words[i]} {words[i+1]} {words[i+2]}"
                    if main_topic.lower() in trigram:
                        niche_data[trigram]['videos'].append(video)
                        niche_data[trigram]['total_vph'] += video.vph
                        niche_data[trigram]['total_views'] += video.views
                        niche_data[trigram]['count'] += 1
            
            # 过滤:至少有 min_videos 个视频
            filtered_niches = {
                niche: data 
                for niche, data in niche_data.items() 
                if data['count'] >= min_videos
            }
            
            if not filtered_niches:
                return [TextContent(
                    type="text",
                    text=f"未找到符合条件的细分领域(至少 {min_videos} 个视频)\n\n建议:\n- 降低 min_videos 参数\n- 扩大时间范围\n- 尝试其他主题关键词"
                )]
            
            # 计算平均 VPH 并排序
            niche_rankings = []
            for niche, data in filtered_niches.items():
                avg_vph = data['total_vph'] / data['count']
                avg_views = data['total_views'] / data['count']
                niche_rankings.append({
                    'niche': niche,
                    'avg_vph': avg_vph,
                    'avg_views': avg_views,
                    'video_count': data['count'],
                    'top_video': max(data['videos'], key=lambda v: v.vph)
                })
            
            # 按平均 VPH 排序
            niche_rankings.sort(key=lambda x: x['avg_vph'], reverse=True)
            top_niches_list = niche_rankings[:top_niches]
            
            # 生成报告
            report = f"# 🔍 '{main_topic}' 主题细分爆款领域分析\n\n"
            report += f"**分析参数**\n"
            report += f"- 主题: {main_topic}\n"
            report += f"- 时间范围: 最近 {hours_ago} 小时\n"
            report += f"- 分析视频数: {len(videos)}\n"
            report += f"- 发现细分领域: {len(filtered_niches)} 个\n"
            report += f"- 最少视频数阈值: {min_videos}\n\n"
            
            report += "---\n\n"
            report += "## 📊 高 VPH 细分领域排行\n\n"
            
            for idx, niche_info in enumerate(top_niches_list, 1):
                report += f"### {idx}. **{niche_info['niche'].title()}**\n\n"
                report += f"- **平均 VPH**: {niche_info['avg_vph']:,.0f} 次/小时\n"
                report += f"- **平均播放量**: {niche_info['avg_views']:,.0f}\n"
                report += f"- **视频数量**: {niche_info['video_count']}\n"
                
                top_vid = niche_info['top_video']
                report += f"- **代表视频**: [{top_vid.title[:50]}...]({top_vid.url})\n"
                report += f"  - 播放量: {top_vid.views:,}\n"
                report += f"  - VPH: {top_vid.vph:,.0f}\n"
                report += f"  - 互动率: {top_vid.engagement_rate:.2f}%\n\n"
            
            # 添加洞察
            report += "---\n\n## 💡 洞察建议\n\n"
            
            if top_niches_list:
                top_niche = top_niches_list[0]
                report += f"1. **最强细分领域**: '{top_niche['niche'].title()}' (平均 VPH {top_niche['avg_vph']:,.0f})\n"
                report += f"2. **内容策略**: 可以围绕这些细分领域创作类似内容\n"
                report += f"3. **竞争程度**: 视频数量 {top_niche['video_count']} 个,属于"
                
                if top_niche['video_count'] < 5:
                    report += "**蓝海市场**(竞争少)\n"
                elif top_niche['video_count'] < 10:
                    report += "**成长市场**(适度竞争)\n"
                else:
                    report += "**红海市场**(竞争激烈)\n"
            
            return [TextContent(type="text", text=report)]
        
        except Exception as e:
            return [TextContent(
                type="text",
                text=f"❌ 发现细分领域失败: {str(e)}"
            )]
  • src/server.py:256-294 (registration)
    Tool registration in list_tools() function. Defines the discover_niche_trends tool with its name, description, inputSchema including main_topic (required), hours_ago, min_videos, and top_niches parameters with their types, defaults, and validation constraints.
    Tool(
        name="discover_niche_trends",
        description=(
            "智能发现某个主题下的细分爆款领域。"
            "通过分析大量视频标题,自动识别高 VPH 的关键词组合,"
            "例如在 'AI' 主题下发现 'AI Kpop'、'AI 印度故事' 等细分趋势。"
        ),
        inputSchema={
            "type": "object",
            "properties": {
                "main_topic": {
                    "type": "string",
                    "description": "主题关键词(如 'AI', 'tutorial', 'funny')"
                },
                "hours_ago": {
                    "type": "integer",
                    "description": "时间范围(小时),默认 24 小时",
                    "default": 24,
                    "minimum": 1,
                    "maximum": 720
                },
                "min_videos": {
                    "type": "integer",
                    "description": "细分领域最少视频数量,默认 3",
                    "default": 3,
                    "minimum": 2,
                    "maximum": 20
                },
                "top_niches": {
                    "type": "integer",
                    "description": "返回前 N 个细分领域,默认 10",
                    "default": 10,
                    "minimum": 3,
                    "maximum": 30
                }
            },
            "required": ["main_topic"]
        }
    )
  • src/server.py:349-355 (registration)
    Call routing in call_tool() function. Routes tool invocation with name 'discover_niche_trends' to the handler function, extracting arguments from the request and passing them to discover_niche_trends().
    elif name == "discover_niche_trends":
        return await discover_niche_trends(
            main_topic=arguments["main_topic"],
            hours_ago=arguments.get("hours_ago", 24),
            min_videos=arguments.get("min_videos", 3),
            top_niches=arguments.get("top_niches", 10)
        )
  • VideoData schema using Pydantic BaseModel. Defines the data structure for YouTube videos with fields including video_id, title, channel_name, views, likes, comments, published_at, engagement_rate, and includes helper methods to_markdown_row() and markdown_header() used for report generation.
    class VideoData(BaseModel):
        """YouTube Shorts 视频数据模型"""
        
        video_id: str = Field(..., description="视频 ID")
        title: str = Field(..., description="视频标题")
        channel_name: str = Field(..., description="频道名称")
        channel_id: str = Field(..., description="频道 ID")
        channel_subscribers: Optional[int] = Field(None, description="频道订阅数")
        
        views: int = Field(..., description="播放量")
        likes: int = Field(default=0, description="点赞数")
        comments: int = Field(default=0, description="评论数")
        
        published_at: datetime = Field(..., description="发布时间")
        duration: str = Field(..., description="视频时长")
        
        url: str = Field(..., description="视频链接")
        thumbnail_url: Optional[str] = Field(None, description="缩略图链接")
        description: Optional[str] = Field(None, description="视频描述")
        
        engagement_rate: float = Field(default=0.0, description="互动率 (%)")
        
        class Config:
            """Pydantic 配置"""
            json_encoders = {
                datetime: lambda v: v.isoformat()
            }
        
        def to_markdown_row(self) -> str:
            """转换为 Markdown 表格行"""
            published_time = self.published_at.strftime('%Y-%m-%d %H:%M')
            return (
                f"| [{self.title[:40]}...]({self.url}) "
                f"| {self.channel_name[:20]} "
                f"| {self.views:,} "
                f"| {self.likes:,} "
                f"| {self.comments:,} "
                f"| {self.engagement_rate:.2f}% "
                f"| {published_time} |"
            )
        
        @staticmethod
        def markdown_header() -> str:
            """Markdown 表格头"""
            return (
                "| 标题 | 频道 | 播放量 | 点赞数 | 评论数 | 互动率 | 发布时间 |\n"
                "|------|------|--------|--------|--------|--------|----------|"
            )
  • ViralAnalyzer.analyze_videos() helper method used by discover_niche_trends to compute engagement rates for all videos. This is called to prepare video metrics before niche analysis.
    @classmethod
    def analyze_videos(cls, videos: List[VideoData]) -> List[VideoData]:
        """
        批量分析视频
        
        Args:
            videos: 视频列表
        
        Returns:
            分析后的视频列表
        """
        return [cls.analyze_video(video) for video in videos]

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Xeron2000/viral-shorts'

If you have feedback or need assistance with the MCP directory API, please join our Discord server