"""MCP tools for Strava API integration."""
from datetime import date, datetime, timedelta
from collections import defaultdict
from typing import Any, Optional, Callable
from src.client import StravaClient, parse_date
from src.auth import (
get_authorization_url,
get_env_credentials,
save_credentials,
complete_auth_with_code,
)
def _get_client_or_error(get_client: Callable[[], Optional[StravaClient]]) -> tuple[Optional[StravaClient], Optional[dict[str, Any]]]:
"""Helper to get client or return error."""
client = get_client()
if client is None:
return None, {"error": "Strava client not initialized. Please authenticate first using save_credentials() and complete_strava_auth()."}
return client, None
def register_tools(mcp, get_client: Callable[[], Optional[StravaClient]]) -> None:
"""
Register all MCP tools with the FastMCP instance.
Args:
mcp: FastMCP instance to register tools with
get_client: Function that returns the StravaClient instance or None
"""
# ============================================================================
# Authentication Tools
# ============================================================================
@mcp.tool()
def save_credentials(client_id: str, client_secret: str) -> dict[str, Any]:
"""
Save your Strava API credentials (Client ID and Client Secret).
This is the first step - save these once, then the server will handle everything else.
Args:
client_id: Your Strava API Client ID
client_secret: Your Strava API Client Secret
Returns:
Dictionary confirming credentials were saved
"""
try:
result = save_credentials(client_id, client_secret)
return {
"data": {
"status": "success",
"message": "Credentials saved successfully",
"file": result["file"],
"next_step": "Use get_auth_url() to get the authorization URL",
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_auth_url(redirect_uri: str = "http://localhost") -> dict[str, Any]:
"""
Get the Strava OAuth authorization URL using stored credentials.
Visit this URL in your browser to authorize the application.
Args:
redirect_uri: Redirect URI (default: http://localhost)
Returns:
Dictionary containing the authorization URL and instructions
"""
try:
credentials = get_env_credentials()
if not credentials["client_id"]:
return {
"error": "Client ID not found. Please save credentials first using save_credentials."
}
auth_url = get_authorization_url(credentials["client_id"], redirect_uri)
return {
"data": {
"authorization_url": auth_url,
"instructions": [
"1. Visit the authorization URL above in your browser",
"2. Authorize the application when prompted",
"3. You'll be redirected to a URL like: http://localhost/?code=AUTHORIZATION_CODE",
"4. Copy the 'code' parameter value from the URL",
"5. Use complete_strava_auth(code) to finish setup",
],
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def complete_strava_auth(auth_code: str) -> dict[str, Any]:
"""
Complete Strava authentication using stored credentials and authorization code.
This automatically exchanges the code for tokens and saves everything.
Args:
auth_code: The authorization code from the redirect URL
Returns:
Dictionary containing setup status and token information
"""
try:
result = complete_auth_with_code(auth_code)
return {
"data": {
"status": "success",
"message": "Strava authentication completed successfully!",
"refresh_token_preview": result["refresh_token_preview"],
"expires_at": result["expires_at"],
"env_file": result["env_file"],
"next_steps": [
"Restart the MCP server to load the new credentials",
"Use check_auth_status() to verify the setup",
"Start using Strava API tools",
],
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_strava_auth_token(
client_id: str, client_secret: str, auth_code: str
) -> dict[str, Any]:
"""
Complete Strava authentication and get refresh token in one step.
This tool handles the full authentication flow: saves credentials, exchanges auth code for tokens.
Args:
client_id: Your Strava API Client ID
client_secret: Your Strava API Client Secret
auth_code: The authorization code from the OAuth redirect URL
Returns:
Dictionary containing authentication status and refresh token information
"""
try:
# Step 1: Save credentials
save_result = save_credentials(client_id, client_secret)
# Step 2: Complete authentication
auth_result = complete_auth_with_code(auth_code)
return {
"data": {
"status": "success",
"message": "Strava authentication completed successfully!",
"refresh_token_preview": auth_result["refresh_token_preview"],
"expires_at": auth_result["expires_at"],
"env_file": auth_result["env_file"],
"credentials_saved": True,
"next_steps": [
"Restart the MCP server to load the new credentials",
"Start using Strava API tools",
],
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def check_auth_status() -> dict[str, Any]:
"""
Check the current authentication status.
Shows whether credentials are configured and tokens are available.
Returns:
Dictionary containing authentication status
"""
try:
credentials = get_env_credentials()
return {
"data": {
"is_configured": credentials["is_configured"],
"has_client_id": bool(credentials["client_id"]),
"has_client_secret": bool(credentials["client_secret"]),
"has_refresh_token": credentials["has_refresh_token"],
"message": (
"Authentication is fully configured and ready to use!"
if credentials["is_configured"]
else (
"Credentials saved. Use get_auth_url() to start authentication."
if credentials["client_id"] and credentials["client_secret"]
else "No credentials found. Use save_credentials() to start setup."
)
),
}
}
except Exception as e:
return {"error": str(e)}
# ============================================================================
# Basic Activity Queries
# ============================================================================
@mcp.tool()
def get_activities(limit: int = 10) -> dict[str, Any]:
"""Get the authenticated athlete's recent activities."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
activities = client.get_activities(limit=limit)
return {"data": activities}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_activities_by_range(start_date: str, end_date: str, limit: int = 30) -> dict[str, Any]:
"""Get activities within a specific date range."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=limit, before=before, after=after)
return {"data": activities}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_activity_by_id(activity_id: int) -> dict[str, Any]:
"""Get detailed information about a specific activity."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
activity = client.get_activity(activity_id)
return {"data": activity}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_recent(days: int = 7, limit: int = 10) -> dict[str, Any]:
"""Get activities from the past X days."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
now = datetime.now()
days_ago = now - timedelta(days=days)
after = int(days_ago.timestamp())
activities = client.get_activities(limit=limit, after=after)
return {"data": activities}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def search(query: str, limit: int = 10) -> dict[str, Any]:
"""Search for activities by name (case-insensitive substring match)."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
activities = client.get_activities(limit=limit)
filtered = [a for a in activities if query.lower() in a.get("name", "").lower()]
return {"data": filtered}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def fetch(id: str) -> dict[str, Any]:
"""Fetch a single activity by its ID."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
activity = client.get_activity(int(id))
return {"data": activity}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_runs_by_range(start_date: str, end_date: str, limit: int = 200) -> dict[str, Any]:
"""Get running activities within a specific date range (optimized for marathon training)."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=limit, before=before, after=after, sport_type="Run")
return {"data": activities}
except Exception as e:
return {"error": str(e)}
# ============================================================================
# Detailed Activity Analysis
# ============================================================================
@mcp.tool()
def get_activity_streams(
activity_id: int,
keys: str = "time,distance,latlng,altitude,velocity_smooth,heartrate,cadence,watts,temp,moving,grade_smooth",
) -> dict[str, Any]:
"""Get detailed time-series data (streams) for a specific activity."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
keys_list = [k.strip() for k in keys.split(",")]
streams = client.get_activity_streams(activity_id, keys_list)
return {"data": streams}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_activity_laps(activity_id: int) -> dict[str, Any]:
"""Get laps for a specific activity."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
laps = client.get_activity_laps(activity_id)
return {"data": laps}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_activity_zones(activity_id: int) -> dict[str, Any]:
"""Get heart rate and power zone data for a specific activity."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
zones = client.get_activity_zones(activity_id)
return {"data": zones}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def get_athlete_stats(athlete_id: Optional[int] = None) -> dict[str, Any]:
"""Get statistics for an athlete."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
stats = client.get_athlete_stats(athlete_id)
return {"data": stats}
except Exception as e:
return {"error": str(e)}
# ============================================================================
# Training Analysis Tools (Marathon Focus)
# ============================================================================
@mcp.tool()
def analyze_weekly(start_date: str, end_date: Optional[str] = None) -> dict[str, Any]:
"""Analyze weekly training metrics for marathon preparation."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date) if end_date else start + timedelta(days=7)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
total_distance = sum(a.get("distance_metres", 0) for a in activities) / 1000
total_time = sum(a.get("moving_time_seconds", 0) for a in activities)
total_elevation = sum(a.get("total_elevation_gain_metres", 0) for a in activities)
total_calories = sum(a.get("calories", 0) for a in activities)
avg_pace_seconds_per_km = total_time / total_distance if total_distance > 0 else 0
avg_pace_min_per_km = avg_pace_seconds_per_km / 60
long_runs = [a for a in activities if a.get("distance_metres", 0) >= 20000]
workouts = [a for a in activities if a.get("workout_type") == 3]
return {
"data": {
"period": {"start": start_date, "end": end.isoformat()},
"total_runs": len(activities),
"total_distance_km": round(total_distance, 2),
"total_time_hours": round(total_time / 3600, 2),
"total_elevation_gain_metres": round(total_elevation, 0),
"total_calories": total_calories,
"average_pace_min_per_km": round(avg_pace_min_per_km, 2),
"long_runs_count": len(long_runs),
"long_runs_distance_km": round(sum(a.get("distance_metres", 0) for a in long_runs) / 1000, 2),
"workouts_count": len(workouts),
"activities": activities,
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def analyze_monthly(year: int, month: int) -> dict[str, Any]:
"""Analyze monthly training metrics for marathon preparation."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = date(year, month, 1)
end = date(year + 1, 1, 1) - timedelta(days=1) if month == 12 else date(year, month + 1, 1) - timedelta(days=1)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
total_distance = sum(a.get("distance_metres", 0) for a in activities) / 1000
total_time = sum(a.get("moving_time_seconds", 0) for a in activities)
total_elevation = sum(a.get("total_elevation_gain_metres", 0) for a in activities)
total_calories = sum(a.get("calories", 0) for a in activities)
avg_pace_seconds_per_km = total_time / total_distance if total_distance > 0 else 0
avg_pace_min_per_km = avg_pace_seconds_per_km / 60
weekly_stats = defaultdict(lambda: {"distance_km": 0, "runs": 0, "time_hours": 0})
for activity in activities:
start_date_str = activity.get("start_date", "")
if start_date_str:
try:
activity_date = datetime.fromisoformat(start_date_str.replace("Z", "+00:00")).date()
week_start = activity_date - timedelta(days=activity_date.weekday())
week_key = week_start.isoformat()
weekly_stats[week_key]["distance_km"] += activity.get("distance_metres", 0) / 1000
weekly_stats[week_key]["runs"] += 1
weekly_stats[week_key]["time_hours"] += activity.get("moving_time_seconds", 0) / 3600
except Exception:
pass
long_runs = [a for a in activities if a.get("distance_metres", 0) >= 20000]
workouts = [a for a in activities if a.get("workout_type") == 3]
races = [a for a in activities if a.get("workout_type") == 1]
return {
"data": {
"period": {"year": year, "month": month, "start": start.isoformat(), "end": end.isoformat()},
"total_runs": len(activities),
"total_distance_km": round(total_distance, 2),
"total_time_hours": round(total_time / 3600, 2),
"total_elevation_gain_metres": round(total_elevation, 0),
"total_calories": total_calories,
"average_pace_min_per_km": round(avg_pace_min_per_km, 2),
"average_distance_per_run_km": round(total_distance / len(activities) if activities else 0, 2),
"long_runs_count": len(long_runs),
"long_runs_distance_km": round(sum(a.get("distance_metres", 0) for a in long_runs) / 1000, 2),
"workouts_count": len(workouts),
"races_count": len(races),
"weekly_breakdown": {
week: {
"distance_km": round(stats["distance_km"], 2),
"runs": stats["runs"],
"time_hours": round(stats["time_hours"], 2),
}
for week, stats in sorted(weekly_stats.items())
},
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def find_long_runs(start_date: str, end_date: str, min_distance_km: float = 20.0) -> dict[str, Any]:
"""Find long runs within a date range."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
min_distance_metres = min_distance_km * 1000
long_runs = [a for a in activities if a.get("distance_metres", 0) >= min_distance_metres]
long_runs.sort(key=lambda x: x.get("distance_metres", 0), reverse=True)
return {
"data": {
"count": len(long_runs),
"min_distance_km": min_distance_km,
"long_runs": long_runs,
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def analyze_pace_trends(start_date: str, end_date: str) -> dict[str, Any]:
"""Analyze pace trends over time for training progression analysis."""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
pace_data = []
for activity in activities:
distance_km = activity.get("distance_metres", 0) / 1000
time_seconds = activity.get("moving_time_seconds", 0)
if distance_km > 0 and time_seconds > 0:
pace_min_per_km = (time_seconds / 60) / distance_km
pace_data.append({
"date": activity.get("start_date", ""),
"distance_km": round(distance_km, 2),
"pace_min_per_km": round(pace_min_per_km, 2),
"activity_id": activity.get("id"),
"name": activity.get("name", ""),
})
pace_data.sort(key=lambda x: x.get("date", ""))
if pace_data:
paces = [p["pace_min_per_km"] for p in pace_data]
avg_pace = sum(paces) / len(paces)
min_pace = min(paces)
max_pace = max(paces)
else:
avg_pace = min_pace = max_pace = 0
return {
"data": {
"period": {"start": start_date, "end": end_date},
"total_runs": len(pace_data),
"average_pace_min_per_km": round(avg_pace, 2),
"fastest_pace_min_per_km": round(min_pace, 2) if pace_data else None,
"slowest_pace_min_per_km": round(max_pace, 2) if pace_data else None,
"pace_data": pace_data,
}
}
except Exception as e:
return {"error": str(e)}
# ============================================================================
# Heart Rate Zone Analysis & Injury Prevention Tools
# ============================================================================
@mcp.tool()
def analyze_hr_zones(start_date: str, end_date: str) -> dict[str, Any]:
"""
Analyze heart rate zone distribution across multiple runs to ensure proper training balance.
Helps identify if you're training too hard (too much time in high zones) or not hard enough.
Args:
start_date: Start date in ISO format (YYYY-MM-DD)
end_date: End date in ISO format (YYYY-MM-DD)
Returns:
Dictionary containing HR zone distribution analysis
"""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
total_time_zones = defaultdict(float)
activities_with_zones = []
total_time_seconds = 0
for activity in activities:
activity_id = activity.get("id")
if not activity_id:
continue
try:
zones_data = client.get_activity_zones(activity_id)
if zones_data and "heart_rate" in zones_data:
hr_zones = zones_data["heart_rate"]
if hr_zones and "distribution_buckets" in hr_zones:
buckets = hr_zones["distribution_buckets"]
activity_time = activity.get("moving_time_seconds", 0)
zone_times = {}
for i, bucket in enumerate(buckets, start=1):
if bucket and "time" in bucket:
zone_time = bucket["time"]
total_time_zones[f"Zone {i}"] += zone_time
zone_times[f"zone_{i}_seconds"] = zone_time
activities_with_zones.append({
"activity_id": activity_id,
"name": activity.get("name", ""),
"date": activity.get("start_date", ""),
"distance_km": round(activity.get("distance_metres", 0) / 1000, 2),
"zone_times": zone_times,
"total_time_seconds": activity_time,
})
total_time_seconds += activity_time
except Exception:
# Skip activities without HR zone data
continue
# Calculate percentages
zone_percentages = {}
if total_time_seconds > 0:
for zone, time_seconds in total_time_zones.items():
zone_percentages[zone] = round((time_seconds / total_time_seconds) * 100, 1)
# Calculate easy vs hard ratio (Zone 1-2 vs Zone 4-5)
easy_time = total_time_zones.get("Zone 1", 0) + total_time_zones.get("Zone 2", 0)
hard_time = total_time_zones.get("Zone 4", 0) + total_time_zones.get("Zone 5", 0)
easy_hard_ratio = round(easy_time / hard_time, 2) if hard_time > 0 else None
# Training balance assessment
zone1_2_percent = zone_percentages.get("Zone 1", 0) + zone_percentages.get("Zone 2", 0)
zone4_5_percent = zone_percentages.get("Zone 4", 0) + zone_percentages.get("Zone 5", 0)
balance_assessment = "Good"
if zone4_5_percent > 30:
balance_assessment = "Too much high-intensity training - increase easy runs"
elif zone1_2_percent < 60:
balance_assessment = "Not enough easy training - aim for 80% easy, 20% hard"
elif zone1_2_percent >= 70 and zone4_5_percent >= 15:
balance_assessment = "Excellent - following 80/20 principle"
return {
"data": {
"period": {"start": start_date, "end": end_date},
"activities_analyzed": len(activities_with_zones),
"total_time_hours": round(total_time_seconds / 3600, 2),
"zone_distribution_seconds": {k: round(v, 0) for k, v in total_time_zones.items()},
"zone_distribution_percent": zone_percentages,
"easy_hard_ratio": easy_hard_ratio,
"easy_zones_percent": round(zone1_2_percent, 1),
"hard_zones_percent": round(zone4_5_percent, 1),
"balance_assessment": balance_assessment,
"recommendation": (
"Aim for 80% of training in Zone 1-2 (easy) and 20% in Zone 4-5 (hard) "
"to optimize performance and reduce injury risk."
),
"activities": activities_with_zones,
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def analyze_training_load(start_date: str, end_date: str) -> dict[str, Any]:
"""
Analyze training load and identify injury risk indicators.
Detects sudden volume increases, consecutive hard days, and lack of recovery.
Args:
start_date: Start date in ISO format (YYYY-MM-DD)
end_date: End date in ISO format (YYYY-MM-DD)
Returns:
Dictionary containing training load analysis and injury risk indicators
"""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
# Group by week
weekly_load = defaultdict(lambda: {
"distance_km": 0,
"time_hours": 0,
"runs": 0,
"hard_runs": 0,
"elevation_metres": 0,
})
daily_load = defaultdict(lambda: {
"distance_km": 0,
"time_hours": 0,
"runs": 0,
"is_hard": False,
})
for activity in activities:
start_date_str = activity.get("start_date", "")
if not start_date_str:
continue
try:
activity_date = datetime.fromisoformat(start_date_str.replace("Z", "+00:00")).date()
week_start = activity_date - timedelta(days=activity_date.weekday())
week_key = week_start.isoformat()
distance_km = activity.get("distance_metres", 0) / 1000
time_hours = activity.get("moving_time_seconds", 0) / 3600
is_hard = activity.get("workout_type") in [1, 3] # Race or workout
weekly_load[week_key]["distance_km"] += distance_km
weekly_load[week_key]["time_hours"] += time_hours
weekly_load[week_key]["runs"] += 1
weekly_load[week_key]["elevation_metres"] += activity.get("total_elevation_gain_metres", 0)
if is_hard:
weekly_load[week_key]["hard_runs"] += 1
daily_load[activity_date.isoformat()]["distance_km"] += distance_km
daily_load[activity_date.isoformat()]["time_hours"] += time_hours
daily_load[activity_date.isoformat()]["runs"] += 1
if is_hard:
daily_load[activity_date.isoformat()]["is_hard"] = True
except Exception:
continue
# Calculate weekly progression
weekly_data = []
for week_key in sorted(weekly_load.keys()):
week_data = weekly_load[week_key]
weekly_data.append({
"week_start": week_key,
"distance_km": round(week_data["distance_km"], 2),
"time_hours": round(week_data["time_hours"], 2),
"runs": week_data["runs"],
"hard_runs": week_data["hard_runs"],
"elevation_metres": round(week_data["elevation_metres"], 0),
})
# Identify injury risk indicators
risk_indicators = []
# Check for volume spikes (>10% increase)
for i in range(1, len(weekly_data)):
prev_distance = weekly_data[i-1]["distance_km"]
curr_distance = weekly_data[i]["distance_km"]
if prev_distance > 0:
increase_percent = ((curr_distance - prev_distance) / prev_distance) * 100
if increase_percent > 10:
risk_indicators.append({
"type": "volume_spike",
"severity": "high" if increase_percent > 20 else "medium",
"message": f"Week {weekly_data[i]['week_start']}: {increase_percent:.1f}% volume increase "
f"({prev_distance:.1f}km → {curr_distance:.1f}km). "
f"Recommend gradual increases of <10% per week.",
"week": weekly_data[i]["week_start"],
})
# Check for consecutive hard days
sorted_days = sorted(daily_load.items())
consecutive_hard = 0
max_consecutive_hard = 0
for day_key, day_data in sorted_days:
if day_data["is_hard"]:
consecutive_hard += 1
max_consecutive_hard = max(max_consecutive_hard, consecutive_hard)
else:
consecutive_hard = 0
if max_consecutive_hard >= 2:
risk_indicators.append({
"type": "consecutive_hard_days",
"severity": "medium" if max_consecutive_hard == 2 else "high",
"message": f"Found {max_consecutive_hard} consecutive hard training days. "
f"Ensure adequate recovery between hard sessions.",
})
# Check for rest days
all_days = set()
for day_key in daily_load.keys():
all_days.add(day_key)
# Calculate days in range
current_date = start
total_days = 0
rest_days = 0
while current_date <= end:
total_days += 1
if current_date.isoformat() not in all_days:
rest_days += 1
current_date += timedelta(days=1)
rest_day_percent = (rest_days / total_days * 100) if total_days > 0 else 0
if rest_day_percent < 20:
risk_indicators.append({
"type": "insufficient_rest",
"severity": "medium",
"message": f"Only {rest_day_percent:.1f}% rest days ({rest_days}/{total_days} days). "
f"Aim for at least 20-30% rest days for optimal recovery.",
})
# Calculate average weekly load
avg_weekly_distance = sum(w["distance_km"] for w in weekly_data) / len(weekly_data) if weekly_data else 0
avg_weekly_time = sum(w["time_hours"] for w in weekly_data) / len(weekly_data) if weekly_data else 0
return {
"data": {
"period": {"start": start_date, "end": end_date},
"weeks_analyzed": len(weekly_data),
"average_weekly_distance_km": round(avg_weekly_distance, 2),
"average_weekly_time_hours": round(avg_weekly_time, 2),
"rest_days": rest_days,
"rest_day_percent": round(rest_day_percent, 1),
"max_consecutive_hard_days": max_consecutive_hard,
"weekly_breakdown": weekly_data,
"risk_indicators": risk_indicators,
"risk_level": (
"high" if any(r["severity"] == "high" for r in risk_indicators)
else "medium" if risk_indicators
else "low"
),
"recommendations": [
"Keep weekly volume increases under 10%",
"Allow at least one rest day between hard sessions",
"Aim for 20-30% rest days",
"Follow 80/20 easy/hard training ratio",
],
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def analyze_hr_trends(start_date: str, end_date: str, reference_pace_min_per_km: Optional[float] = None) -> dict[str, Any]:
"""
Analyze heart rate trends over time to track fitness improvements.
Lower HR at the same pace indicates improved fitness.
Args:
start_date: Start date in ISO format (YYYY-MM-DD)
end_date: End date in ISO format (YYYY-MM-DD)
reference_pace_min_per_km: Optional reference pace to compare HR at (e.g., 5.0 for 5:00/km)
Returns:
Dictionary containing heart rate trend analysis
"""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
hr_trend_data = []
for activity in activities:
activity_id = activity.get("id")
if not activity_id:
continue
distance_km = activity.get("distance_metres", 0) / 1000
time_seconds = activity.get("moving_time_seconds", 0)
avg_hr = activity.get("average_heartrate_bpm")
pace_min_per_km = (time_seconds / 60) / distance_km if distance_km > 0 else None
if not avg_hr or not pace_min_per_km or distance_km < 3: # Skip very short runs
continue
# If reference pace specified, only include runs close to that pace
if reference_pace_min_per_km:
pace_diff = abs(pace_min_per_km - reference_pace_min_per_km)
if pace_diff > 0.5: # More than 30 seconds/km difference
continue
hr_trend_data.append({
"date": activity.get("start_date", ""),
"activity_id": activity_id,
"name": activity.get("name", ""),
"distance_km": round(distance_km, 2),
"pace_min_per_km": round(pace_min_per_km, 2),
"average_hr_bpm": avg_hr,
"max_hr_bpm": activity.get("max_heartrate_bpm"),
})
hr_trend_data.sort(key=lambda x: x.get("date", ""))
if not hr_trend_data:
return {
"data": {
"message": "No activities with heart rate data found in the specified period",
"period": {"start": start_date, "end": end_date},
}
}
# Calculate trends
if len(hr_trend_data) >= 2:
first_half = hr_trend_data[:len(hr_trend_data)//2]
second_half = hr_trend_data[len(hr_trend_data)//2:]
avg_hr_first = sum(d["average_hr_bpm"] for d in first_half) / len(first_half)
avg_hr_second = sum(d["average_hr_bpm"] for d in second_half) / len(second_half)
hr_change = avg_hr_second - avg_hr_first
avg_pace_first = sum(d["pace_min_per_km"] for d in first_half) / len(first_half)
avg_pace_second = sum(d["pace_min_per_km"] for d in second_half) / len(second_half)
pace_change = avg_pace_second - avg_pace_first
# Fitness assessment
if hr_change < -3 and pace_change <= 0.1:
fitness_trend = "Improving - Lower HR at similar pace"
elif hr_change > 3:
fitness_trend = "Declining - Higher HR at similar pace (may indicate fatigue/overtraining)"
else:
fitness_trend = "Stable"
else:
avg_hr_first = avg_hr_second = hr_change = None
avg_pace_first = avg_pace_second = pace_change = None
fitness_trend = "Insufficient data for trend analysis"
all_hrs = [d["average_hr_bpm"] for d in hr_trend_data]
avg_hr_overall = sum(all_hrs) / len(all_hrs) if all_hrs else None
return {
"data": {
"period": {"start": start_date, "end": end_date},
"activities_analyzed": len(hr_trend_data),
"reference_pace_min_per_km": reference_pace_min_per_km,
"average_hr_bpm": round(avg_hr_overall, 1) if avg_hr_overall else None,
"first_half_avg_hr_bpm": round(avg_hr_first, 1) if avg_hr_first else None,
"second_half_avg_hr_bpm": round(avg_hr_second, 1) if avg_hr_second else None,
"hr_change_bpm": round(hr_change, 1) if hr_change is not None else None,
"first_half_avg_pace": round(avg_pace_first, 2) if avg_pace_first else None,
"second_half_avg_pace": round(avg_pace_second, 2) if avg_pace_second else None,
"pace_change_min_per_km": round(pace_change, 2) if pace_change is not None else None,
"fitness_trend": fitness_trend,
"hr_trend_data": hr_trend_data,
}
}
except Exception as e:
return {"error": str(e)}
@mcp.tool()
def analyze_recovery(start_date: str, end_date: str) -> dict[str, Any]:
"""
Analyze recovery patterns including rest days, easy run frequency, and training stress distribution.
Args:
start_date: Start date in ISO format (YYYY-MM-DD)
end_date: End date in ISO format (YYYY-MM-DD)
Returns:
Dictionary containing recovery analysis
"""
client, error = _get_client_or_error(get_client)
if error:
return error
try:
start = parse_date(start_date)
end = parse_date(end_date)
after = int(datetime.combine(start, datetime.min.time()).timestamp())
before = int(datetime.combine(end, datetime.max.time()).timestamp())
activities = client.get_activities(limit=200, before=before, after=after, sport_type="Run")
# Categorize runs
easy_runs = []
hard_runs = []
long_runs = []
rest_days = set()
# Track daily activity
daily_activities = defaultdict(list)
for activity in activities:
start_date_str = activity.get("start_date", "")
if not start_date_str:
continue
try:
activity_date = datetime.fromisoformat(start_date_str.replace("Z", "+00:00")).date()
daily_activities[activity_date.isoformat()].append(activity)
distance_km = activity.get("distance_metres", 0) / 1000
workout_type = activity.get("workout_type")
avg_hr = activity.get("average_heartrate_bpm")
max_hr = activity.get("max_heartrate_bpm")
# Categorize
is_hard = workout_type in [1, 3] # Race or workout
is_long = distance_km >= 20
if is_hard:
hard_runs.append({
"date": activity_date.isoformat(),
"activity_id": activity.get("id"),
"name": activity.get("name", ""),
"distance_km": round(distance_km, 2),
"type": "workout" if workout_type == 3 else "race",
})
elif is_long:
long_runs.append({
"date": activity_date.isoformat(),
"activity_id": activity.get("id"),
"name": activity.get("name", ""),
"distance_km": round(distance_km, 2),
})
else:
easy_runs.append({
"date": activity_date.isoformat(),
"activity_id": activity.get("id"),
"name": activity.get("name", ""),
"distance_km": round(distance_km, 2),
"avg_hr_bpm": avg_hr,
})
except Exception:
continue
# Calculate rest days
current_date = start
total_days = 0
while current_date <= end:
total_days += 1
if current_date.isoformat() not in daily_activities:
rest_days.add(current_date.isoformat())
current_date += timedelta(days=1)
# Calculate recovery metrics
rest_day_count = len(rest_days)
rest_day_percent = (rest_day_count / total_days * 100) if total_days > 0 else 0
# Easy vs hard ratio
total_runs = len(easy_runs) + len(hard_runs) + len(long_runs)
easy_hard_ratio = round(len(easy_runs) / len(hard_runs), 2) if hard_runs else None
# Check for adequate recovery between hard sessions
recovery_issues = []
hard_dates = sorted([r["date"] for r in hard_runs])
for i in range(1, len(hard_dates)):
prev_date = datetime.fromisoformat(hard_dates[i-1]).date()
curr_date = datetime.fromisoformat(hard_dates[i]).date()
days_between = (curr_date - prev_date).days
if days_between < 2:
recovery_issues.append({
"date": hard_dates[i],
"days_since_last_hard": days_between,
"message": f"Only {days_between} day(s) between hard sessions - insufficient recovery",
})
# Recovery assessment
recovery_score = 100
if rest_day_percent < 20:
recovery_score -= 20
if easy_hard_ratio and easy_hard_ratio < 3:
recovery_score -= 20
if recovery_issues:
recovery_score -= len(recovery_issues) * 10
recovery_assessment = (
"Excellent" if recovery_score >= 80
else "Good" if recovery_score >= 60
else "Needs Improvement" if recovery_score >= 40
else "Poor"
)
return {
"data": {
"period": {"start": start_date, "end": end_date},
"total_days": total_days,
"rest_days": rest_day_count,
"rest_day_percent": round(rest_day_percent, 1),
"total_runs": total_runs,
"easy_runs": len(easy_runs),
"hard_runs": len(hard_runs),
"long_runs": len(long_runs),
"easy_hard_ratio": easy_hard_ratio,
"recovery_score": recovery_score,
"recovery_assessment": recovery_assessment,
"recovery_issues": recovery_issues,
"recommendations": [
f"Aim for {round(total_days * 0.25)} rest days ({25}% of training period)",
"Maintain 3:1 or better easy:hard run ratio",
"Allow at least 2 days between hard sessions",
"Include 1-2 rest days per week",
],
"rest_days_list": sorted(list(rest_days)),
"easy_runs": easy_runs[:10], # Limit to first 10 for brevity
"hard_runs": hard_runs,
"long_runs": long_runs,
}
}
except Exception as e:
return {"error": str(e)}