Skip to main content
Glama
analyze_cache.py3.95 kB
import ast import sys from pathlib import Path def analyze_file(filepath): with open(filepath, 'r', encoding='utf-8') as f: content = f.read() try: tree = ast.parse(content) class ComplexityAnalyzer(ast.NodeVisitor): def __init__(self): self.complexity_score = 0 self.issues = [] self.function_count = 0 self.class_count = 0 def visit_FunctionDef(self, node): self.function_count += 1 # 计算圈复杂度 complexity = 1 for child in ast.walk(node): if isinstance(child, (ast.If, ast.For, ast.While, ast.With, ast.ExceptHandler)): complexity += 1 if complexity > 5: self.issues.append(f'函数 {node.name} 圈复杂度过高: {complexity}') # 检查函数长度 lines = content.count('\n', node.lineno, node.end_lineno if hasattr(node, 'end_lineno') and node.end_lineno else content.find('\n', node.lineno)) if lines > 30: self.issues.append(f'函数 {node.name} 过长: {lines}行') # 检查参数数量 args_count = len(node.args.args) + len(node.args.kwonlyargs) if hasattr(node.args, 'vararg') and node.args.vararg: args_count += 1 if hasattr(node.args, 'kwarg') and node.args.kwarg: args_count += 1 if args_count > 5: self.issues.append(f'函数 {node.name} 参数过多: {args_count}个') self.complexity_score += complexity self.generic_visit(node) def visit_ClassDef(self, node): self.class_count += 1 # 检查类的大小 methods = [n for n in node.body if isinstance(n, ast.FunctionDef)] if len(methods) > 15: self.issues.append(f'类 {node.name} 方法过多: {len(methods)}') # 检查类的行数 if hasattr(node, 'end_lineno') and node.end_lineno: class_lines = node.end_lineno - node.lineno + 1 if class_lines > 200: self.issues.append(f'类 {node.name} 过长: {class_lines}行') self.generic_visit(node) analyzer = ComplexityAnalyzer() analyzer.visit(tree) # 计算总复杂度 total_lines = len(content.splitlines()) print(f'文件: {filepath}') print(f'总行数: {total_lines}') print(f'函数数量: {analyzer.function_count}') print(f'类数量: {analyzer.class_count}') print(f'复杂度评分: {analyzer.complexity_score}') print(f'发现问题数: {len(analyzer.issues)}') # 分类显示问题 issues_by_type = {} for issue in analyzer.issues: issue_type = issue.split()[1] # 获取问题类型 if issue_type not in issues_by_type: issues_by_type[issue_type] = [] issues_by_type[issue_type].append(issue) for issue_type, issues in issues_by_type.items(): print(f'\n{issue_type}相关问题 ({len(issues)}个):') for issue in issues: print(f' - {issue}') return len(analyzer.issues) except Exception as e: print(f'分析失败: {e}') return 0 if __name__ == "__main__": file_path = "src/data_access/cache.py" issue_count = analyze_file(file_path) print(f'\n总结: 发现 {issue_count} 个潜在问题')

Latest Blog Posts

MCP directory API

We provide all the information about MCP servers via our MCP API.

curl -X GET 'https://glama.ai/api/mcp/v1/servers/kscz0000/Zhiwen-Assistant-MCP'

If you have feedback or need assistance with the MCP directory API, please join our Discord server