Skip to main content
Glama
gemini2026

Documentation Search MCP Server

by gemini2026

scan_project_dependencies

Scan project dependencies in pyproject.toml or requirements.txt to identify security vulnerabilities and generate a comprehensive security report.

Instructions

Scans project dependencies from files like pyproject.toml or requirements.txt for vulnerabilities. Args: project_path: The path to the project directory (defaults to current directory). Returns: A comprehensive security report of all project dependencies.

Input Schema

TableJSON Schema
NameRequiredDescriptionDefault
project_pathNo.

Implementation Reference

  • The main handler function for the 'scan_project_dependencies' tool, decorated with @mcp.tool() for registration. It orchestrates dependency parsing and vulnerability scanning.
    async def scan_project_dependencies(project_path: str = "."): """ Scans project dependencies from files like pyproject.toml or requirements.txt for vulnerabilities. Args: project_path: The path to the project directory (defaults to current directory). Returns: A comprehensive security report of all project dependencies. """ from .vulnerability_scanner import vulnerability_scanner from .project_scanner import find_and_parse_dependencies parsed_info = find_and_parse_dependencies(project_path) if not parsed_info: return { "error": "No dependency file found.", "message": "Supported files are pyproject.toml, requirements.txt, or package.json.", } filename, ecosystem, dependencies = parsed_info if not dependencies: return { "summary": { "dependency_file": filename, "ecosystem": ecosystem, "total_dependencies": 0, "vulnerable_count": 0, "overall_project_risk": "None", "message": "No dependencies found to scan.", }, "vulnerable_packages": [], } total_deps = len(dependencies) logger.debug( "Found %s dependencies in %s. Scanning for vulnerabilities...", total_deps, filename, ) scan_tasks = [ vulnerability_scanner.scan_library(name, ecosystem) for name in dependencies.keys() ] results = await asyncio.gather(*scan_tasks, return_exceptions=True) vulnerable_deps = [] for i, report_item in enumerate(results): dep_name = list(dependencies.keys())[i] if isinstance(report_item, Exception): # Could log this error continue else: report = report_item if report.vulnerabilities: # type: ignore vulnerable_deps.append( { "library": dep_name, "version": dependencies[dep_name], "vulnerability_count": report.total_vulnerabilities, # type: ignore "security_score": report.security_score, "summary": ( report.recommendations[0] if report.recommendations else "Update to the latest version." ), "details": [ vuln.to_dict() for vuln in report.vulnerabilities[:3] ], # Top 3 } ) vulnerable_deps.sort(key=lambda x: x["security_score"]) return { "summary": { "dependency_file": filename, "ecosystem": ecosystem, "total_dependencies": total_deps, "vulnerable_count": len(vulnerable_deps), "overall_project_risk": ( "High" if any(d["security_score"] < 50 for d in vulnerable_deps) else ( "Medium" if any(d["security_score"] < 70 for d in vulnerable_deps) else "Low" ) ), }, "vulnerable_packages": vulnerable_deps, }
  • Helper function that locates and parses dependency files (pyproject.toml, requirements.txt, package.json) to extract package names and versions.
    def find_and_parse_dependencies( directory: str, ) -> Optional[Tuple[str, str, Dict[str, str]]]: """ Finds and parses the most relevant dependency file in a directory. Returns: A tuple of (file_path, ecosystem, dependencies_dict) or None. """ supported_files = { "pyproject.toml": ("PyPI", parse_pyproject_toml), "requirements.txt": ("PyPI", parse_requirements_txt), "package.json": ("npm", parse_package_json), } for filename, (ecosystem, parser_func) in supported_files.items(): file_path = os.path.join(directory, filename) if os.path.exists(file_path): try: with open(file_path, "r", encoding="utf-8") as f: content = f.read() dependencies = parser_func(content) return filename, ecosystem, dependencies except Exception as e: print(f"⚠️ Error parsing {filename}: {e}", file=sys.stderr) # Continue to the next file type if parsing fails continue return None
  • Core helper method in VulnerabilityScanner class that performs vulnerability scans on individual libraries using multiple sources (OSV, GitHub, Safety DB), called by the main handler for each dependency.
    async def scan_library( self, library_name: str, ecosystem: str = "PyPI" ) -> SecurityReport: """ Comprehensive vulnerability scan for a library Args: library_name: Name of the library (e.g., "fastapi", "react") ecosystem: Package ecosystem ("PyPI", "npm", "Maven", etc.) Returns: SecurityReport with vulnerability details """ cache_key = f"{library_name}_{ecosystem}" # Check cache first if self._is_cached(cache_key): return self.cache[cache_key]["data"] vulnerabilities = [] # Scan multiple sources in parallel scan_tasks = [ self._scan_osv(library_name, ecosystem), self._scan_github_advisories(library_name, ecosystem), ( self._scan_safety_db(library_name) if ecosystem.lower() == "pypi" else self._empty_scan() ), ] try: results = await asyncio.gather(*scan_tasks, return_exceptions=True) for result in results: if isinstance(result, list): vulnerabilities.extend(result) elif isinstance(result, Exception): print(f"Scan error: {result}", file=sys.stderr) except Exception as e: print(f"Vulnerability scan failed for {library_name}: {e}", file=sys.stderr) # Generate security report report = self._generate_security_report( library_name, ecosystem, vulnerabilities ) # Cache the result self._cache_result(cache_key, report) return report

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/gemini2026/documentation-search-mcp'

If you have feedback or need assistance with the MCP directory API, please join our Discord server