Skip to main content
Glama
Talhelf
by Talhelf

analyze_category_trends

Analyze Google search trends for adult content categories to identify popularity patterns and growth over time across different regions.

Instructions

Analyze Google search trends for adult content categories.

Args:
    categories: List of category keywords (max 5)
               Example: ["milf", "teen", "amateur", "lesbian", "hentai"]
    timeframe: Time period (default: past 12 months)
    region: Region code (default: US)

Returns:
    Trend analysis showing category popularity and growth.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
categoriesYes
timeframeNotoday 12-m
regionNoUS

Implementation Reference

  • The analyze_category_trends tool handler decorated with @mcp.tool(). Validates input (max 5 categories) and delegates to search_trends helper to perform the actual Google Trends analysis.
    @mcp.tool()
    async def analyze_category_trends(
        categories: list[str],
        timeframe: str = "today 12-m",
        region: str = "US"
    ) -> str:
        """
        Analyze Google search trends for adult content categories.
        
        Args:
            categories: List of category keywords (max 5)
                       Example: ["milf", "teen", "amateur", "lesbian", "hentai"]
            timeframe: Time period (default: past 12 months)
            region: Region code (default: US)
        
        Returns:
            Trend analysis showing category popularity and growth.
        """
        
        if len(categories) > 5:
            return "⚠️  Maximum 5 categories can be compared at once."
        
        return await search_trends(categories, timeframe, region)
  • The search_trends helper function that orchestrates the Google Trends data fetching. It calls get_trends_data to fetch raw data and formats it using format_interest_over_time, format_regional_interest, and format_related_queries.
    async def search_trends(
        keywords: list[str],
        timeframe: str = "today 12-m",
        region: str = "US"
    ) -> str:
        """
        Search Google Trends for any keywords (performers, platforms, categories, etc.).
        
        Args:
            keywords: List of search terms to analyze (max 5). Examples:
                      ["Lana Rhoades", "Riley Reid"]
                      ["pornhub", "onlyfans"]
                      ["milf", "teen", "amateur"]
            timeframe: Time period. Options:
                       'today 12-m' (past year, default)
                       'today 3-m' (past 3 months)
                       'today 5-y' (past 5 years)
                       '2020-01-01 2024-12-31' (custom date range)
                       Available back to 2004
            region: Geographic region code:
                    'US' (USA, default)
                    'GB' (UK)
                    '' (Worldwide)
                    Any ISO country code
        
        Returns:
            Complete Google Trends analysis with interest over time, regional data, and related queries.
        """
        
        if not pytrends:
            return "❌ Google Trends API is not available. Please check configuration."
        
        if len(keywords) > 5:
            return "⚠️  Maximum 5 keywords allowed per query. Please reduce your list."
        
        if len(keywords) == 0:
            return "⚠️  Please provide at least one keyword to search."
        
        # Fetch data
        data = get_trends_data(keywords, timeframe, region)
        
        # Format and return results
        result = [
            format_interest_over_time(data),
            "",
            format_regional_interest(data, top_n=10),
            format_related_queries(data),
            "",
            "📝 Notes:",
            "- Values are on a 0-100 scale where 100 = peak popularity for the time period",
            "- Data represents search interest, not absolute search volumes",
            f"- Data fetched: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        ]
        
        return "\n".join(result)
  • The get_trends_data function that fetches data from the Google Trends API using pytrends. Implements caching, rate limiting, and error handling for trend data retrieval.
    def get_trends_data(keywords: list[str], timeframe: str = 'today 12-m', geo: str = 'US') -> dict:
        """
        Fetch Google Trends data for given keywords.
        
        Args:
            keywords: List of search terms to compare (max 5)
            timeframe: Time period (e.g., 'today 12-m', 'today 5-y', '2020-01-01 2024-12-31')
            geo: Geographic region (e.g., 'US', 'GB', '' for worldwide)
        
        Returns:
            Dictionary with trends data
        """
        if not pytrends:
            return {"error": "Google Trends API not available"}
        
        # Check cache
        cache_key = f"{','.join(keywords)}_{timeframe}_{geo}"
        if cache_key in TRENDS_CACHE:
            cached = TRENDS_CACHE[cache_key]
            age = (datetime.now() - datetime.fromisoformat(cached['fetched_at'])).seconds
            if age < 3600:  # Cache for 1 hour
                print(f"Using cached data (age: {age}s)", file=sys.stderr)
                return cached
        
        try:
            print(f"Fetching Google Trends: {keywords}, {timeframe}, {geo}", file=sys.stderr)
            
            # Build payload
            pytrends.build_payload(keywords, cat=0, timeframe=timeframe, geo=geo, gprop='')
            
            # Get interest over time
            interest_over_time_df = pytrends.interest_over_time()
            
            # Get interest by region
            try:
                interest_by_region_df = pytrends.interest_by_region(resolution='REGION', inc_low_vol=True, inc_geo_code=False)
            except Exception as e:
                print(f"Could not fetch regional data: {e}", file=sys.stderr)
                interest_by_region_df = pd.DataFrame()
            
            # Get related queries
            try:
                related_queries = pytrends.related_queries()
            except Exception as e:
                print(f"Could not fetch related queries: {e}", file=sys.stderr)
                related_queries = {}
            
            result = {
                "keywords": keywords,
                "timeframe": timeframe,
                "geo": geo,
                "interest_over_time": interest_over_time_df.to_dict() if not interest_over_time_df.empty else {},
                "interest_by_region": interest_by_region_df.to_dict() if not interest_by_region_df.empty else {},
                "related_queries": related_queries,
                "fetched_at": datetime.now().isoformat()
            }
            
            # Cache the result
            TRENDS_CACHE[cache_key] = result
            
            # Rate limiting
            time.sleep(1)
            
            return result
            
        except Exception as e:
            print(f"Error fetching Google Trends data: {e}", file=sys.stderr)
            return {"error": str(e)}
  • The format_interest_over_time helper that formats the trend data into readable text, calculating statistics (average, peak, low) for each keyword.
    def format_interest_over_time(data: dict) -> str:
        """Format interest over time data into readable text."""
        if "error" in data:
            return f"❌ Error: {data['error']}"
        
        if not data.get("interest_over_time"):
            return "No data available for the specified time period and keywords."
        
        lines = [
            f"📊 Google Trends Analysis",
            f"Keywords: {', '.join(data['keywords'])}",
            f"Region: {data['geo'] if data['geo'] else 'Worldwide'}",
            f"Period: {data['timeframe']}",
            "=" * 60,
            ""
        ]
        
        # Calculate statistics for each keyword
        interest_data = data['interest_over_time']
        if interest_data:
            lines.append("📈 Search Interest Statistics (0-100 scale):")
            lines.append("")
            
            for keyword in data['keywords']:
                if keyword in interest_data:
                    values = [v for v in interest_data[keyword].values() if isinstance(v, (int, float))]
                    if values:
                        avg = sum(values) / len(values)
                        max_val = max(values)
                        min_val = min(values)
                        
                        lines.append(f"'{keyword}':")
                        lines.append(f"   Average: {avg:.1f}")
                        lines.append(f"   Peak: {max_val}")
                        lines.append(f"   Low: {min_val}")
                        
                        # Trend direction
                        if len(values) >= 2:
                            recent_avg = sum(values[-4:]) / min(4, len(values[-4:]))
                            older_avg = sum(values[:4]) / min(4, len(values[:4]))
                            if recent_avg > older_avg * 1.1:
                                lines.append(f"   Trend: 📈 Growing ({((recent_avg/older_avg - 1) * 100):.0f}%)")
                            elif recent_avg < older_avg * 0.9:
                                lines.append(f"   Trend: 📉 Declining ({((1 - recent_avg/older_avg) * 100):.0f}%)")
                            else:
                                lines.append(f"   Trend: ➡️  Stable")
                        
                        lines.append("")
        
        return "\n".join(lines)
  • Helper functions format_regional_interest and format_related_queries that format regional interest data and related queries into readable text output.
    def format_regional_interest(data: dict, top_n: int = 10) -> str:
        """Format regional interest data into readable text."""
        if "error" in data:
            return f"❌ Error: {data['error']}"
        
        if not data.get("interest_by_region"):
            return "No regional data available."
        
        lines = [
            f"🌎 Regional Interest",
            "=" * 60,
            ""
        ]
        
        region_data = data['interest_by_region']
        for keyword in data['keywords']:
            if keyword in region_data:
                lines.append(f"Top regions for '{keyword}':")
                
                # Sort regions by interest
                regions = {region: value for region, value in region_data[keyword].items() 
                          if isinstance(value, (int, float)) and value > 0}
                sorted_regions = sorted(regions.items(), key=lambda x: x[1], reverse=True)[:top_n]
                
                if sorted_regions:
                    for i, (region, value) in enumerate(sorted_regions, 1):
                        lines.append(f"   {i}. {region}: {value}/100")
                else:
                    lines.append("   No regional data available")
                lines.append("")
        
        return "\n".join(lines)
    
    
    def format_related_queries(data: dict) -> str:
        """Format related queries data."""
        if "error" in data or not data.get("related_queries"):
            return ""
        
        lines = [
            "",
            "🔍 Related & Trending Queries",
            "=" * 60,
            ""
        ]
        
        related = data['related_queries']
        for keyword in data['keywords']:
            if keyword in related:
                lines.append(f"Related to '{keyword}':")
                
                # Top related queries
                if 'top' in related[keyword] and related[keyword]['top'] is not None:
                    top_df = related[keyword]['top']
                    if not top_df.empty:
                        lines.append("  Top:")
                        for idx, row in top_df.head(5).iterrows():
                            lines.append(f"    • {row['query']} ({row['value']})")
                
                # Rising queries
                if 'rising' in related[keyword] and related[keyword]['rising'] is not None:
                    rising_df = related[keyword]['rising']
                    if not rising_df.empty:
                        lines.append("  Rising:")
                        for idx, row in rising_df.head(5).iterrows():
                            growth = row['value']
                            if growth == 'Breakout':
                                lines.append(f"    • {row['query']} (🔥 Breakout)")
                            else:
                                lines.append(f"    • {row['query']} (+{growth}%)")
                
                lines.append("")
        
        return "\n".join(lines)

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/Talhelf/ph-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server