Skip to main content
Glama

ReadFiles

Extract and analyze full file content or specific line ranges from absolute paths. Enables precise file reading for tasks requiring line number understanding.

Instructions

  • Read full file content of one or more files.

  • Provide absolute paths only (~ allowed)

  • Only if the task requires line numbers understanding:

    • You may extract a range of lines. E.g., /path/to/file:1-10 for lines 1-10. You can drop start or end like /path/to/file:1- or /path/to/file:-10

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
file_pathsYes

Implementation Reference

  • Main handler function for the ReadFiles tool. Reads specified files (with optional line ranges), applies token limits, formats content with line numbers, tracks read ranges, and handles truncation.
    def read_files( file_paths: list[str], coding_max_tokens: Optional[int], noncoding_max_tokens: Optional[int], context: Context, start_line_nums: Optional[list[Optional[int]]] = None, end_line_nums: Optional[list[Optional[int]]] = None, ) -> tuple[ str, dict[str, list[tuple[int, int]]], bool ]: # Updated to return file paths with ranges message = "" file_ranges_dict: dict[ str, list[tuple[int, int]] ] = {} # Map file paths to line ranges workspace_path = context.bash_state.workspace_root stats = load_workspace_stats(workspace_path) for path_ in file_paths: path_ = expand_user(path_) if not os.path.isabs(path_): continue if path_ not in stats.files: stats.files[path_] = FileStats() stats.files[path_].increment_read() save_workspace_stats(workspace_path, stats) truncated = False for i, file in enumerate(file_paths): try: # Use line numbers from parameters if provided start_line_num = None if start_line_nums is None else start_line_nums[i] end_line_num = None if end_line_nums is None else end_line_nums[i] # For backward compatibility, we still need to extract line numbers from path # if they weren't provided as parameters content, truncated, tokens, path, line_range = read_file( file, coding_max_tokens, noncoding_max_tokens, context, start_line_num, end_line_num, ) # Add file path with line range to dictionary if path in file_ranges_dict: file_ranges_dict[path].append(line_range) else: file_ranges_dict[path] = [line_range] except Exception as e: message += f"\n{file}: {str(e)}\n" continue if coding_max_tokens: coding_max_tokens = max(0, coding_max_tokens - tokens) if noncoding_max_tokens: noncoding_max_tokens = max(0, noncoding_max_tokens - tokens) range_formatted = range_format(start_line_num, end_line_num) message += ( f'\n<file-contents-numbered path="{file}{range_formatted}">\n{content}\n' ) if not truncated: message += "</file-contents-numbered>" # Check if we've hit both token limit if ( truncated or (coding_max_tokens is not None and coding_max_tokens <= 0) and (noncoding_max_tokens is not None and noncoding_max_tokens <= 0) ): not_reading = file_paths[i + 1 :] if not_reading: message += f"\nNot reading the rest of the files: {', '.join(not_reading)} due to token limit, please call again" break return message, file_ranges_dict, truncated
  • Pydantic BaseModel defining the input schema for ReadFiles. Parses line range specifications from file_paths (e.g., 'path:2-4') into private attributes.
    class ReadFiles(BaseModel): file_paths: list[str] _start_line_nums: List[Optional[int]] = PrivateAttr(default_factory=lambda: []) _end_line_nums: List[Optional[int]] = PrivateAttr(default_factory=lambda: []) @property def show_line_numbers_reason(self) -> str: return "True" @property def start_line_nums(self) -> List[Optional[int]]: """Get the start line numbers.""" return self._start_line_nums @property def end_line_nums(self) -> List[Optional[int]]: """Get the end line numbers.""" return self._end_line_nums def model_post_init(self, __context: Any) -> None: # Parse file paths for line ranges and store them in private attributes self._start_line_nums = [] self._end_line_nums = [] # Create new file_paths list without line ranges clean_file_paths = [] for file_path in self.file_paths: start_line_num = None end_line_num = None path_part = file_path # Check if the path ends with a line range pattern # We're looking for patterns at the very end of the path like: # - file.py:10 (specific line) # - file.py:10-20 (line range) # - file.py:10- (from line 10 to end) # - file.py:-20 (from start to line 20) # Split by the last colon if ":" in file_path: parts = file_path.rsplit(":", 1) if len(parts) == 2: potential_path = parts[0] line_spec = parts[1] # Check if it's a valid line range format if line_spec.isdigit(): # Format: file.py:10 try: start_line_num = int(line_spec) path_part = potential_path except ValueError: # Keep the original path if conversion fails pass elif "-" in line_spec: # Could be file.py:10-20, file.py:10-, or file.py:-20 line_parts = line_spec.split("-", 1) if not line_parts[0] and line_parts[1].isdigit(): # Format: file.py:-20 try: end_line_num = int(line_parts[1]) path_part = potential_path except ValueError: # Keep original path pass elif line_parts[0].isdigit(): # Format: file.py:10-20 or file.py:10- try: start_line_num = int(line_parts[0]) if line_parts[1].isdigit(): # file.py:10-20 end_line_num = int(line_parts[1]) # In both cases, update the path path_part = potential_path except ValueError: # Keep original path pass # Add clean path and corresponding line numbers clean_file_paths.append(path_part) self._start_line_nums.append(start_line_num) self._end_line_nums.append(end_line_num) # Update file_paths with clean paths self.file_paths = clean_file_paths return super().model_post_init(__context)
  • MCP Tool registration in TOOL_PROMPTS list, providing schema, name, description, and annotations. Returned by list_tools().
    Tool( inputSchema=remove_titles_from_schema(ReadFiles.model_json_schema()), name="ReadFiles", description=""" - Read full file content of one or more files. - Provide absolute paths only (~ allowed) - Only if the task requires line numbers understanding: - You may extract a range of lines. E.g., `/path/to/file:1-10` for lines 1-10. You can drop start or end like `/path/to/file:1-` or `/path/to/file:-10` """, annotations=ToolAnnotations(readOnlyHint=True, openWorldHint=False), ),
  • Dispatch block in get_tool_output() that recognizes ReadFiles instances and invokes the read_files handler.
    elif isinstance(arg, ReadFiles): context.console.print("Calling read file tool") # Access line numbers through properties result, file_ranges_dict, _ = read_files( arg.file_paths, coding_max_tokens, noncoding_max_tokens, context, arg.start_line_nums, arg.end_line_nums, ) output = result, 0.0 # Merge the new file ranges into our tracking dictionary for path, ranges in file_ranges_dict.items(): if path in file_paths_with_ranges: file_paths_with_ranges[path].extend(ranges) else: file_paths_with_ranges[path] = ranges elif isinstance(arg, Initialize):
  • Supporting function called by read_files to read individual files, handle line ranges, add line numbers, and apply token-based truncation.
    def read_file( file_path: str, coding_max_tokens: Optional[int], noncoding_max_tokens: Optional[int], context: Context, start_line_num: Optional[int] = None, end_line_num: Optional[int] = None, ) -> tuple[str, bool, int, str, tuple[int, int]]: context.console.print(f"Reading file: {file_path}") show_line_numbers = True # Line numbers are now passed as parameters, no need to parse from path # Expand the path before checking if it's absolute file_path = expand_user(file_path) if not os.path.isabs(file_path): raise ValueError( f"Failure: file_path should be absolute path, current working directory is {context.bash_state.cwd}" ) path = Path(file_path) if not path.exists(): raise ValueError(f"Error: file {file_path} does not exist") # Read all lines of the file with path.open("r") as f: all_lines = f.readlines(10_000_000) if all_lines and all_lines[-1].endswith("\n"): # Special handling of line counts because readlines doesn't consider last empty line as a separate line all_lines.append("") total_lines = len(all_lines) # Apply line range filtering if specified start_idx = 0 if start_line_num is not None: # Convert 1-indexed line number to 0-indexed start_idx = max(0, start_line_num - 1) end_idx = len(all_lines) if end_line_num is not None: # end_line_num is inclusive, so we use min to ensure it's within bounds end_idx = min(len(all_lines), end_line_num) # Convert back to 1-indexed line numbers for tracking effective_start = start_line_num if start_line_num is not None else 1 effective_end = end_line_num if end_line_num is not None else total_lines filtered_lines = all_lines[start_idx:end_idx] # Create content with or without line numbers if show_line_numbers: content_lines = [] for i, line in enumerate(filtered_lines, start=start_idx + 1): content_lines.append(f"{i} {line}") content = "".join(content_lines) else: content = "".join(filtered_lines) truncated = False tokens_counts = 0 # Select the appropriate max_tokens based on file type max_tokens = select_max_tokens(file_path, coding_max_tokens, noncoding_max_tokens) # Handle token limit if specified if max_tokens is not None: tokens = default_enc.encoder(content) tokens_counts = len(tokens) if len(tokens) > max_tokens: # Truncate at token boundary first truncated_tokens = tokens[:max_tokens] truncated_content = default_enc.decoder(truncated_tokens) # Count how many lines we kept line_count = truncated_content.count("\n") # Calculate the last line number shown (1-indexed) last_line_shown = start_idx + line_count content = truncated_content # Add informative message about truncation with total line count total_lines = len(all_lines) content += ( f"\n(...truncated) Only showing till line number {last_line_shown} of {total_lines} total lines due to the token limit, please continue reading from {last_line_shown + 1} if required" f" using syntax {file_path}:{last_line_shown + 1}-{total_lines}" ) truncated = True # Update effective_end if truncated effective_end = last_line_shown # Return the content along with the effective line range that was read return ( content, truncated, tokens_counts, file_path, (effective_start, effective_end), )

Other Tools

Related Tools

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/rusiaaman/wcgw'

If you have feedback or need assistance with the MCP directory API, please join our Discord server