Skip to main content
Glama
r-huijts

FirstCycling MCP Server

by r-huijts

get_rider_best_results

Retrieve a cyclist's top career results, including race positions, stage wins, and classifications. Input a rider ID to get formatted highlights sorted by race importance, with details like category, country, and date.

Instructions

Retrieve the best career results of a professional cyclist, including their top finishes in various races. This tool provides a comprehensive overview of a rider's most significant achievements throughout their career, including their highest positions in major races, stage wins, and overall classifications. Results are sorted by importance and include detailed information about each race.

Note: If you don't know the rider's ID, use the search_rider tool first to find it by name.

Example usage:
- Get top 10 best results for Tadej Pogačar (ID: 16973)
- Get top 5 best results for Jonas Vingegaard (ID: 16974)

Returns a formatted string with:
- Rider's name and career highlights
- Top results sorted by importance
- Race details including category and country
- Date and position for each result

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
limitNo
rider_idYes

Implementation Reference

  • The main handler function named exactly 'get_rider_best_results' that takes rider_id, optionally debug, creates Rider object, fetches best_results, and displays top results.
    def get_rider_best_results(rider_id, debug=False):
        """Get a rider's best results from FirstCycling"""
        # Create rider instance
        rider = Rider(rider_id)
        
        # Get basic rider info
        print(f"Rider ID: {rider.ID}")
    
        # Debug information if enabled
        if debug:
            # Debug: Check the URL directly
            url = f"https://firstcycling.com/rider.php?r={rider_id}&high=1"
            print(f"Checking URL: {url}")
            response = requests.get(url)
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # Debug: Check for tables
            tables = soup.find_all('table', {'class': 'tablesorter'})
            print(f"Found {len(tables)} tables with class 'tablesorter'")
            
            if tables:
                # Check the first table
                first_table = tables[0]
                rows = first_table.find_all('tr')
                print(f"First table has {len(rows)} rows")
                
                if len(rows) > 1:  # Header row + at least one data row
                    # Try to parse directly
                    try:
                        print("Table content preview:")
                        for i, row in enumerate(rows[:3]):  # Show first 3 rows
                            print(f"Row {i}: {row.get_text().strip()}")
                        
                        # Try direct pandas parsing
                        df = pd.read_html(io.StringIO(str(first_table)), decimal=',')[0]
                        print(f"Successfully parsed table with shape: {df.shape}")
                        print("Column names:", df.columns.tolist())
                        print("First few rows:")
                        print(df.head(3))
                    except Exception as e:
                        print(f"Error parsing table directly: {str(e)}")
                else:
                    print("Table appears to be empty or has only a header row")
        
        # Get best results
        best_results = rider.best_results()
        
        # Display information about best results
        if hasattr(best_results, 'results_df') and not best_results.results_df.empty:
            print(f"\nFound {len(best_results.results_df)} best results:")
            
            # Display the first 10 best results (or all if less than 10)
            print("\nTop 10 best results:")
            limit = min(10, len(best_results.results_df))
            for i, (_, row) in enumerate(best_results.results_df.head(limit).iterrows(), 1):
                race = row.get('Race', 'Unknown Race')
                pos = row.get('Pos', 'N/A')
                editions = row.get('Editions', '')
                cat = row.get('CAT', '')
                country = row.get('Race_Country', '')
                
                result_line = f"{i}. {pos}. {race}"
                if cat:
                    result_line += f" ({cat})"
                if editions:
                    result_line += f" - {editions}"
                if country:
                    result_line += f" - {country}"
                    
                print(result_line)
        else:
            print("No best results found for this rider.")
  • Helper method on Rider class that fetches the best results endpoint data using RiderBestResults parser.
    def best_results(self):
    	"""
    	Get the rider's best results.
    
    	Returns
    	-------
    	RiderBestResults
    	"""
    	return self._get_endpoint(endpoint=RiderBestResults, high=1)
  • Helper class RiderBestResults that parses the HTML soup to extract and structure the best results table into a pandas DataFrame.
    class RiderBestResults(RiderEndpoint):
    	"""
    	Rider's best results. Extends RiderEndpoint.
    
    	Attributes
    	----------
    	results_df : pd.DataFrame
    		Table of rider's best results.
    	"""
    
    	def _parse_soup(self):
    		super()._parse_soup()
    		self._get_best_results()
    
    	def _get_best_results(self):
    		# Find table with best results (note different class than victories table)
    		table = self.soup.find('table', {'class': "tablesorter"})
    		if table:
    			# Check if the table has "No data" content
    			no_data_text = table.get_text().strip()
    			if "No data" in no_data_text:
    				# Table exists but has no data
    				self.results_df = pd.DataFrame()
    				return
    				
    			try:
    				# Try to parse the table manually since the structure is different
    				headers = [th.text.strip() for th in table.find('thead').find_all('th')]
    				
    				# Create empty lists to store row data
    				rows_data = []
    				
    				# Get all data rows
    				tbody = table.find('tbody') if table.find('tbody') else table
    				for tr in tbody.find_all('tr'):
    					row_data = {}
    					cells = tr.find_all('td')
    					
    					# Skip empty rows
    					if not cells:
    						continue
    						
    					# Map each cell to its header
    					for i, cell in enumerate(cells):
    						if i < len(headers):
    							header = headers[i]
    							row_data[header] = cell.text.strip()
    							
    							# Extract race ID if available
    							if header == 'Race' and cell.find('a'):
    								href = cell.find('a').get('href', '')
    								import re
    								race_id_match = re.search(r'r=(\d+)', href)
    								if race_id_match:
    									row_data['Race_ID'] = race_id_match.group(1)
    									
    							# Extract country code if available
    							if cell.find('img'):
    								img_src = cell.find('img').get('src', '')
    								country_code = img_to_country_code(cell.find('img'))
    								if country_code:
    									row_data['Race_Country'] = country_code
    					
    					rows_data.append(row_data)
    				
    				# Create DataFrame from the collected data
    				self.results_df = pd.DataFrame(rows_data)
    				
    				# If the DataFrame is empty after parsing, set to empty DataFrame
    				if self.results_df.empty:
    					self.results_df = pd.DataFrame()
    					
    			except Exception as e:
    				# If there's an error in parsing, handle it by creating a basic DataFrame manually
    				print(f"Warning: Error parsing best results table: {str(e)}")
    				# Fallback: Try to create a DataFrame directly from the HTML
    				try:
    					# Parse the basic table
    					html = str(table)
    					self.results_df = pd.read_html(io.StringIO(html), decimal=',')[0]
    					
    					# Check if the table contains "No data"
    					if self.results_df.empty or (self.results_df.shape[0] == 1 and 
    						any("No data" in str(cell) for cell in self.results_df.iloc[0])):
    						self.results_df = pd.DataFrame()
    						return
    						
    				except Exception as e:
    					# If all else fails, just return an empty DataFrame
    					print(f"Warning: Error creating DataFrame from table HTML: {str(e)}")
    					self.results_df = pd.DataFrame()
    		else:
    			# No table found
    			self.results_df = pd.DataFrame()

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/r-huijts/firstcycling-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server