consult_gemini_with_files
Send a query to Gemini with file content to get AI assistance on code or documents. Specify files, working directory, and optional model for tailored responses.
Instructions
Send a query to the Gemini CLI with file context.
Args:
query: Prompt text forwarded to the CLI.
directory: Working directory used for resolving relative file paths.
files: Relative or absolute file paths to include alongside the prompt.
model: Optional model alias (``flash``, ``pro``) or full Gemini model id.
timeout_seconds: Optional per-call timeout override in seconds.
mode: ``"inline"`` streams truncated snippets; ``"at_command"`` emits
``@path`` directives so Gemini CLI resolves files itself.
Returns:
Gemini's response or an explanatory error string with any warnings.Input Schema
| Name | Required | Description | Default |
|---|---|---|---|
| query | Yes | ||
| directory | Yes | ||
| files | No | ||
| model | No | ||
| timeout_seconds | No | ||
| mode | No | inline |
Output Schema
| Name | Required | Description | Default |
|---|---|---|---|
| result | Yes |
Implementation Reference
- src/mcp_server.py:430-455 (handler)The MCP tool handler 'consult_gemini_with_files'. Decorated with @mcp.tool(), it validates the files parameter is provided and delegates to execute_gemini_with_files.
@mcp.tool() def consult_gemini_with_files( query: str, directory: str, files: list[str] | None = None, model: str | None = None, timeout_seconds: int | None = None, mode: str = "inline", ) -> str: """Send a query to the Gemini CLI with file context. Args: query: Prompt text forwarded to the CLI. directory: Working directory used for resolving relative file paths. files: Relative or absolute file paths to include alongside the prompt. model: Optional model alias (``flash``, ``pro``) or full Gemini model id. timeout_seconds: Optional per-call timeout override in seconds. mode: ``"inline"`` streams truncated snippets; ``"at_command"`` emits ``@path`` directives so Gemini CLI resolves files itself. Returns: Gemini's response or an explanatory error string with any warnings. """ if not files: return "Error: files parameter is required for consult_gemini_with_files" return execute_gemini_with_files(query, directory, files, model, timeout_seconds, mode) - src/mcp_server.py:321-406 (handler)Core execution function 'execute_gemini_with_files' that builds the Gemini CLI command, prepares inline or @-command file payloads, runs the subprocess, and returns the response (with any warnings).
def execute_gemini_with_files( query: str, directory: str = ".", files: list[str] | None = None, model: str | None = None, timeout_seconds: int | None = None, mode: str = "inline", ) -> str: """ Execute gemini CLI command with file attachments. Args: query: The prompt to send to Gemini directory: Working directory for the command files: List of file paths to attach (relative to directory) model: Optional model name (flash, pro, etc.) Returns: CLI output or error message """ # Check if gemini CLI is available if not shutil.which("gemini"): return "Error: Gemini CLI not found. Install with: npm install -g @google/gemini-cli" # Validate directory if not Path(directory).is_dir(): return f"Error: Directory does not exist: {directory}" # Validate files parameter if not files: return "Error: No files provided for file attachment mode" # Build command - use stdin for input to avoid hanging selected_model = _normalize_model_name(model) cmd = ["gemini", "-m", selected_model] mode_normalized = mode.lower() warnings: list[str] if mode_normalized not in {"inline", "at_command"}: return f"Error: Unsupported files mode '{mode}'. Use 'inline' or 'at_command'." if mode_normalized == "inline": inline_payload, warnings = _prepare_inline_payload(directory, files) stdin_pieces = [piece for piece in [inline_payload, query] if piece] stdin_content = "\n\n".join(stdin_pieces) else: at_prompt, warnings = _prepare_at_command_prompt(directory, files) stdin_pieces = [piece for piece in [at_prompt, query] if piece] stdin_content = "\n\n".join(stdin_pieces) # Execute CLI command - simple timeout, no retries timeout = _coerce_timeout(timeout_seconds) try: result = subprocess.run( cmd, cwd=directory, capture_output=True, text=True, timeout=timeout, input=stdin_content ) if result.returncode == 0: output = result.stdout.strip() if result.stdout.strip() else "No output from Gemini CLI" else: error_msg = result.stderr.strip() # Provide helpful suggestions for common errors if "not available" in error_msg.lower() or "unauthorized" in error_msg.lower(): output = f"Gemini CLI Error: Model '{selected_model}' may not be available for your account. Try: 'flash', 'flash-lite', or 'auto'. Details: {error_msg}" elif "authentication" in error_msg.lower() or "auth" in error_msg.lower(): output = f"Gemini CLI Error: Authentication required. Run: gemini auth login. Details: {error_msg}" else: output = f"Gemini CLI Error: {error_msg}" if warnings: warning_block = "Warnings:\n" + "\n".join(f"- {w}" for w in warnings) return f"{warning_block}\n\n{output}" return output except subprocess.TimeoutExpired: return f"Error: Gemini CLI command timed out after {timeout} seconds. Try increasing timeout or simplifying your query." except FileNotFoundError: return "Error: Gemini CLI not found. Install with: npm install -g @google/gemini-cli" except Exception as e: return f"Error executing Gemini CLI: {str(e)}" - src/mcp_server.py:430-430 (registration)@mcp.tool() decorator registering 'consult_gemini_with_files' as an MCP tool on the FastMCP server.
@mcp.tool() - src/mcp_server.py:182-233 (helper)Helper '_prepare_inline_payload' reads and truncates file contents for inline mode, respecting configurable size/count limits.
def _prepare_inline_payload(directory: str, files: list[str]) -> tuple[str, list[str]]: """Return stdin payload for inline mode and any warnings.""" warnings: list[str] = [] file_blocks: list[str] = [] total_bytes = 0 processed = 0 if MAX_INLINE_FILE_COUNT <= 0: warnings.append("Inline attachments disabled via MAX_INLINE_FILE_COUNT<=0") return "", warnings for original_path in files: abs_path, rel_path = _resolve_path(directory, original_path) display_name = rel_path or Path(abs_path).name if not Path(abs_path).exists(): warnings.append(f"Skipped missing file: {display_name}") continue if processed >= MAX_INLINE_FILE_COUNT: warnings.append( f"Inline file limit reached ({MAX_INLINE_FILE_COUNT}); skipped remaining attachments", ) break try: content, truncated, bytes_used = _read_file_for_inline(abs_path) except Exception as exc: # IOError or decoding issues warnings.append(f"Error reading {display_name}: {exc}") continue if total_bytes + bytes_used > MAX_INLINE_TOTAL_BYTES: warnings.append( f"Inline payload exceeded {MAX_INLINE_TOTAL_BYTES} bytes; skipped {display_name} and remaining attachments", ) break block_header = f"=== {display_name} ===" if truncated: block_header += "\n[gemini-bridge] Content truncated for inline transfer" file_blocks.append(f"{block_header}\n{content}") if truncated: warnings.append( f"Truncated {display_name}; only the first {INLINE_CHUNK_HEAD_BYTES}B and last {INLINE_CHUNK_TAIL_BYTES}B were sent", ) total_bytes += bytes_used processed += 1 payload = "\n\n".join(file_blocks) return payload, warnings - src/mcp_server.py:236-257 (helper)Helper '_prepare_at_command_prompt' generates @path directives for at_command mode so Gemini CLI resolves files itself.
def _prepare_at_command_prompt(directory: str, files: list[str]) -> tuple[str, list[str]]: """Return prompt lines for @-command usage and warnings.""" warnings: list[str] = [] prompt_lines: list[str] = [] for original_path in files: abs_path, rel_path = _resolve_path(directory, original_path) if not Path(abs_path).exists(): warnings.append(f"Skipped missing file: {original_path}") continue if rel_path is None: warnings.append( f"Skipped file outside working directory: {original_path}", ) continue prompt_lines.append(f"@{rel_path}") if not prompt_lines: warnings.append("No readable files resolved for @ command; prompt unchanged") prompt = "\n".join(prompt_lines) return prompt, warnings