| | import gradio as gr |
| | import sys |
| | import os |
| | import tempfile |
| | import shutil |
| | import ast |
| | import time |
| | import subprocess |
| | import re |
| | from typing import List, Dict, Optional, Tuple, Any |
| | from py2puml.py2puml import py2puml |
| | from plantuml import PlantUML |
| | import pyan |
| | from pathlib import Path |
| |
|
| | if os.name == "nt": |
| | graphviz_bin = r"C:\\Program Files\\Graphviz\\bin" |
| | if graphviz_bin not in os.environ["PATH"]: |
| | os.environ["PATH"] += os.pathsep + graphviz_bin |
| |
|
| |
|
| | def setup_testing_space(): |
| | """Create persistent testing_space directory and __init__.py at startup.""" |
| | testing_dir = os.path.join(os.getcwd(), "testing_space") |
| | os.makedirs(testing_dir, exist_ok=True) |
| |
|
| | init_file = os.path.join(testing_dir, "__init__.py") |
| | if not os.path.exists(init_file): |
| | with open(init_file, "w", encoding="utf-8") as f: |
| | f.write("# Testing space for py2puml analysis\n") |
| | print("π Created testing_space directory and __init__.py") |
| | else: |
| | print("π testing_space directory already exists") |
| |
|
| |
|
| | def cleanup_testing_space(): |
| | """Remove all .py files except __init__.py from testing_space.""" |
| | testing_dir = os.path.join(os.getcwd(), "testing_space") |
| | if not os.path.exists(testing_dir): |
| | print("β οΈ testing_space directory not found, creating it...") |
| | setup_testing_space() |
| | return |
| |
|
| | |
| | files_removed = 0 |
| | for file in os.listdir(testing_dir): |
| | if file.endswith(".py") and file != "__init__.py": |
| | file_path = os.path.join(testing_dir, file) |
| | try: |
| | os.remove(file_path) |
| | files_removed += 1 |
| | except Exception as e: |
| | print(f"β οΈ Could not remove {file}: {e}") |
| |
|
| | if files_removed > 0: |
| | print(f"π§Ή Cleaned up {files_removed} leftover .py files from testing_space") |
| |
|
| |
|
| | def verify_testing_space(): |
| | """Verify testing_space contains only __init__.py.""" |
| | testing_dir = os.path.join(os.getcwd(), "testing_space") |
| | if not os.path.exists(testing_dir): |
| | return False |
| |
|
| | files = os.listdir(testing_dir) |
| | expected_files = ["__init__.py"] |
| |
|
| | return files == expected_files |
| |
|
| |
|
| | def generate_call_graph_with_pyan3( |
| | python_code: str, filename: str = "analysis" |
| | ) -> Tuple[Optional[str], Optional[str], Dict[str, Any]]: |
| | """Generate call graph using pyan3 and return DOT content, PNG path, and structured data. |
| | |
| | Args: |
| | python_code: The Python code to analyze |
| | filename: Base filename for temporary files |
| | |
| | Returns: |
| | Tuple of (dot_content, png_path, structured_data) |
| | """ |
| | if not python_code.strip(): |
| | return None, None, {} |
| |
|
| | |
| | timestamp = str(int(time.time() * 1000)) |
| | unique_filename = f"{filename}_{timestamp}" |
| |
|
| | |
| | testing_dir = os.path.join(os.getcwd(), "testing_space") |
| | code_file = os.path.join(testing_dir, f"{unique_filename}.py") |
| |
|
| | try: |
| | |
| | with open(code_file, "w", encoding="utf-8") as f: |
| | f.write(python_code) |
| |
|
| | print(f"π Generating call graph for: {unique_filename}.py") |
| |
|
| | try: |
| |
|
| | dot_content = pyan.create_callgraph( |
| | filenames=[str(code_file)], |
| | format="dot", |
| | colored=True, |
| | grouped=True, |
| | annotated=True, |
| | ) |
| |
|
| | png_path = None |
| | with tempfile.TemporaryDirectory() as temp_dir: |
| | dot_file = os.path.join(temp_dir, f"{unique_filename}.dot") |
| | temp_png = os.path.join(temp_dir, f"{unique_filename}.png") |
| |
|
| | |
| | with open(dot_file, "w", encoding="utf-8") as f: |
| | f.write(dot_content) |
| |
|
| | |
| | dot_cmd = ["dot", "-Tpng", dot_file, "-o", temp_png] |
| |
|
| | try: |
| | subprocess.run(dot_cmd, check=True, timeout=30) |
| |
|
| | if os.path.exists(temp_png): |
| | |
| | permanent_dir = os.path.join(os.getcwd(), "temp_diagrams") |
| | os.makedirs(permanent_dir, exist_ok=True) |
| | png_path = os.path.join( |
| | permanent_dir, f"callgraph_{unique_filename}.png" |
| | ) |
| | shutil.copy2(temp_png, png_path) |
| | print(f"π¨ Call graph PNG saved: {os.path.basename(png_path)}") |
| |
|
| | except subprocess.SubprocessError as e: |
| | print(f"β οΈ Graphviz PNG generation failed: {e}") |
| | |
| |
|
| | |
| | structured_data = parse_call_graph_data(dot_content) |
| |
|
| | return dot_content, png_path, structured_data |
| |
|
| | except subprocess.TimeoutExpired: |
| | print("β οΈ pyan3 analysis timed out, trying simplified approach...") |
| | return try_fallback_analysis(python_code, unique_filename) |
| | except subprocess.SubprocessError as e: |
| | print(f"β οΈ pyan3 execution failed: {e}, trying fallback...") |
| | return try_fallback_analysis(python_code, unique_filename) |
| |
|
| | except Exception as e: |
| | print(f"β Call graph generation error: {e}") |
| | return None, None, {"error": str(e)} |
| |
|
| | finally: |
| | |
| | if os.path.exists(code_file): |
| | try: |
| | os.remove(code_file) |
| | print(f"π§Ή Cleaned up analysis file: {unique_filename}.py") |
| | except Exception as e: |
| | print(f"β οΈ Could not remove analysis file: {e}") |
| |
|
| |
|
| | def parse_call_graph_data(dot_content: str) -> Dict[str, Any]: |
| | """Parse pyan3 DOT output into structured function call data. |
| | |
| | Args: |
| | dot_content: DOT format string from pyan3 |
| | |
| | Returns: |
| | Dictionary with parsed call graph information |
| | """ |
| | if not dot_content: |
| | return {} |
| |
|
| | try: |
| | |
| | node_pattern = r'"([^"]+)"\s*\[' |
| | nodes = re.findall(node_pattern, dot_content) |
| |
|
| | |
| | edge_pattern = r'"([^"]+)"\s*->\s*"([^"]+)"' |
| | edges = re.findall(edge_pattern, dot_content) |
| |
|
| | |
| | call_graph = {} |
| | called_by = {} |
| |
|
| | for caller, callee in edges: |
| | if caller not in call_graph: |
| | call_graph[caller] = [] |
| | call_graph[caller].append(callee) |
| |
|
| | if callee not in called_by: |
| | called_by[callee] = [] |
| | called_by[callee].append(caller) |
| |
|
| | |
| | function_metrics = {} |
| | for node in nodes: |
| | out_degree = len(call_graph.get(node, [])) |
| | in_degree = len(called_by.get(node, [])) |
| |
|
| | function_metrics[node] = { |
| | "calls_made": out_degree, |
| | "called_by_count": in_degree, |
| | "calls_to": call_graph.get(node, []), |
| | "called_by": called_by.get(node, []), |
| | } |
| |
|
| | return { |
| | "nodes": nodes, |
| | "edges": edges, |
| | "total_functions": len(nodes), |
| | "total_calls": len(edges), |
| | "call_graph": call_graph, |
| | "function_metrics": function_metrics, |
| | } |
| |
|
| | except Exception as e: |
| | return {"parse_error": str(e)} |
| |
|
| |
|
| | def try_fallback_analysis( |
| | python_code: str, unique_filename: str |
| | ) -> Tuple[Optional[str], Optional[str], Dict[str, Any]]: |
| | """Fallback analysis when pyan3 fails - basic function call detection. |
| | |
| | Args: |
| | python_code: The Python code to analyze |
| | unique_filename: Unique filename for this analysis |
| | |
| | Returns: |
| | Tuple of (None, None, fallback_analysis_data) |
| | """ |
| | print("π Using fallback analysis approach...") |
| |
|
| | try: |
| | import ast |
| | import re |
| |
|
| | tree = ast.parse(python_code) |
| | functions = [] |
| | calls = [] |
| |
|
| | |
| | for node in ast.walk(tree): |
| | if isinstance(node, ast.FunctionDef): |
| | functions.append(node.name) |
| |
|
| | |
| | for func in functions: |
| | |
| | pattern = rf"\b{re.escape(func)}\s*\(" |
| | if re.search(pattern, python_code): |
| | calls.append(("unknown", func)) |
| |
|
| | return ( |
| | None, |
| | None, |
| | { |
| | "fallback": True, |
| | "functions_detected": functions, |
| | "total_functions": len(functions), |
| | "total_calls": len(calls), |
| | "info": f"Fallback analysis: detected {len(functions)} functions", |
| | "function_metrics": { |
| | func: { |
| | "calls_made": 0, |
| | "called_by_count": 0, |
| | "calls_to": [], |
| | "called_by": [], |
| | } |
| | for func in functions |
| | }, |
| | }, |
| | ) |
| |
|
| | except Exception as e: |
| | return None, None, {"error": f"Fallback analysis also failed: {str(e)}"} |
| |
|
| |
|
| | def analyze_function_complexity(python_code: str) -> Dict[str, Any]: |
| | """Analyze function complexity using AST. |
| | |
| | Args: |
| | python_code: The Python code to analyze |
| | |
| | Returns: |
| | Dictionary with function complexity metrics |
| | """ |
| | if not python_code.strip(): |
| | return {} |
| |
|
| | try: |
| | tree = ast.parse(python_code) |
| | function_analysis = {} |
| |
|
| | for node in ast.walk(tree): |
| | if isinstance(node, ast.FunctionDef): |
| | |
| | complexity = 1 |
| |
|
| | for child in ast.walk(node): |
| | if isinstance( |
| | child, |
| | ( |
| | ast.If, |
| | ast.While, |
| | ast.For, |
| | ast.Try, |
| | ast.ExceptHandler, |
| | ast.With, |
| | ast.Assert, |
| | ), |
| | ): |
| | complexity += 1 |
| | elif isinstance(child, ast.BoolOp): |
| | complexity += len(child.values) - 1 |
| |
|
| | |
| | lines = ( |
| | node.end_lineno - node.lineno + 1 |
| | if hasattr(node, "end_lineno") |
| | else 0 |
| | ) |
| |
|
| | |
| | params = [arg.arg for arg in node.args.args] |
| |
|
| | |
| | has_docstring = ( |
| | len(node.body) > 0 |
| | and isinstance(node.body[0], ast.Expr) |
| | and isinstance(node.body[0].value, ast.Constant) |
| | and isinstance(node.body[0].value.value, str) |
| | ) |
| |
|
| | function_analysis[node.name] = { |
| | "complexity": complexity, |
| | "lines_of_code": lines, |
| | "parameter_count": len(params), |
| | "parameters": params, |
| | "has_docstring": has_docstring, |
| | "line_start": node.lineno, |
| | "line_end": getattr(node, "end_lineno", node.lineno), |
| | } |
| |
|
| | return function_analysis |
| |
|
| | except Exception as e: |
| | return {"error": str(e)} |
| |
|
| |
|
| | def generate_diagram(python_code: str, filename: str = "diagram") -> Optional[str]: |
| | """Generate a UML class diagram from Python code. |
| | |
| | Args: |
| | python_code: The Python code to analyze and convert to UML |
| | filename: Optional name for the generated diagram file |
| | |
| | Returns: |
| | Path to the generated PNG diagram image or None if failed |
| | """ |
| | if not python_code.strip(): |
| | return None |
| |
|
| | print(f"π Processing code for diagram generation...") |
| |
|
| | |
| | cleanup_testing_space() |
| |
|
| | |
| | if not verify_testing_space(): |
| | print("β οΈ testing_space verification failed, recreating...") |
| | setup_testing_space() |
| | cleanup_testing_space() |
| |
|
| | |
| | timestamp = str(int(time.time() * 1000)) |
| | unique_filename = f"{filename}_{timestamp}" |
| |
|
| | |
| | testing_dir = os.path.join(os.getcwd(), "testing_space") |
| | code_file = os.path.join(testing_dir, f"{unique_filename}.py") |
| |
|
| | |
| | server = PlantUML(url="http://www.plantuml.com/plantuml/img/") |
| |
|
| | try: |
| | |
| | with open(code_file, "w", encoding="utf-8") as f: |
| | f.write(python_code) |
| |
|
| | print(f"π Created temporary file: testing_space/{unique_filename}.py") |
| |
|
| | |
| | print(f"π Generating PlantUML content...") |
| | puml_content_lines = py2puml( |
| | os.path.join( |
| | testing_dir, unique_filename |
| | ), |
| | f"testing_space.{unique_filename}", |
| | ) |
| | puml_content = "".join(puml_content_lines) |
| |
|
| | if not puml_content.strip(): |
| | print("β οΈ No UML content generated - check if your code contains classes") |
| | return None |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as temp_dir: |
| | |
| | puml_file = os.path.join(temp_dir, f"{unique_filename}.puml") |
| | with open(puml_file, "w", encoding="utf-8") as f: |
| | f.write(puml_content) |
| |
|
| | print(f"π¨ Rendering diagram...") |
| | |
| | output_png = os.path.join(temp_dir, f"{unique_filename}.png") |
| | server.processes_file(puml_file, outfile=output_png) |
| |
|
| | if os.path.exists(output_png): |
| | print("β
Diagram generated successfully!") |
| | |
| | permanent_dir = os.path.join(os.getcwd(), "temp_diagrams") |
| | os.makedirs(permanent_dir, exist_ok=True) |
| | permanent_path = os.path.join( |
| | permanent_dir, f"{filename}_{hash(python_code) % 10000}.png" |
| | ) |
| | shutil.copy2(output_png, permanent_path) |
| | return permanent_path |
| | else: |
| | print("β Failed to generate PNG") |
| | return None |
| |
|
| | except Exception as e: |
| | print(f"β Error: {e}") |
| | return None |
| |
|
| | finally: |
| | |
| | if os.path.exists(code_file): |
| | try: |
| | os.remove(code_file) |
| | print(f"π§Ή Cleaned up temporary file: {unique_filename}.py") |
| | except Exception as e: |
| | print(f"β οΈ Could not remove temporary file: {e}") |
| |
|
| |
|
| | def analyze_code_structure(python_code: str) -> str: |
| | """Enhanced code analysis combining AST + pyan3 call graphs. |
| | |
| | Args: |
| | python_code: The Python code to analyze |
| | |
| | Returns: |
| | Comprehensive analysis report in markdown format |
| | """ |
| | if not python_code.strip(): |
| | return "No code provided for analysis." |
| |
|
| | try: |
| | |
| | tree = ast.parse(python_code) |
| | classes = [] |
| | functions = [] |
| | imports = [] |
| |
|
| | for node in ast.walk(tree): |
| | if isinstance(node, ast.ClassDef): |
| | methods = [] |
| | attributes = [] |
| |
|
| | for item in node.body: |
| | if isinstance(item, ast.FunctionDef): |
| | methods.append(item.name) |
| | elif isinstance(item, ast.Assign): |
| | for target in item.targets: |
| | if isinstance(target, ast.Name): |
| | attributes.append(target.id) |
| |
|
| | |
| | parents = [base.id for base in node.bases if isinstance(base, ast.Name)] |
| |
|
| | classes.append( |
| | { |
| | "name": node.name, |
| | "methods": methods, |
| | "attributes": attributes, |
| | "parents": parents, |
| | } |
| | ) |
| |
|
| | elif isinstance(node, ast.FunctionDef): |
| | |
| | is_method = any( |
| | isinstance(parent, ast.ClassDef) |
| | for parent in ast.walk(tree) |
| | if hasattr(parent, "body") and node in getattr(parent, "body", []) |
| | ) |
| | if not is_method: |
| | functions.append(node.name) |
| |
|
| | elif isinstance(node, (ast.Import, ast.ImportFrom)): |
| | if isinstance(node, ast.Import): |
| | for alias in node.names: |
| | imports.append(alias.name) |
| | else: |
| | module = node.module or "" |
| | for alias in node.names: |
| | imports.append( |
| | f"{module}.{alias.name}" if module else alias.name |
| | ) |
| |
|
| | |
| | function_complexity = analyze_function_complexity(python_code) |
| |
|
| | |
| | call_graph_data = {} |
| | if functions or any(classes): |
| | try: |
| | cleanup_testing_space() |
| | dot_content, png_path, call_graph_data = generate_call_graph_with_pyan3( |
| | python_code |
| | ) |
| | except Exception as e: |
| | print(f"β οΈ Call graph analysis failed: {e}") |
| | call_graph_data = {"error": str(e)} |
| |
|
| | |
| | summary = "π **Enhanced Code Analysis Results**\n\n" |
| |
|
| | |
| | summary += "## π **Overview**\n" |
| | summary += f"β’ **{len(classes)}** classes found\n" |
| | summary += f"β’ **{len(functions)}** standalone functions found\n" |
| | summary += f"β’ **{len(set(imports))}** unique imports\n" |
| |
|
| | if call_graph_data and "total_functions" in call_graph_data: |
| | summary += f"β’ **{call_graph_data['total_functions']}** total functions/methods in call graph\n" |
| | summary += ( |
| | f"β’ **{call_graph_data['total_calls']}** function calls detected\n" |
| | ) |
| |
|
| | summary += "\n" |
| |
|
| | |
| | if classes: |
| | summary += "## ποΈ **Classes**\n" |
| | for cls in classes: |
| | summary += f"### **{cls['name']}**\n" |
| | if cls["parents"]: |
| | summary += f" - **Inherits from**: {', '.join(cls['parents'])}\n" |
| | summary += f" - **Methods**: {len(cls['methods'])}" |
| | if cls["methods"]: |
| | summary += f" ({', '.join(cls['methods'])})" |
| | summary += "\n" |
| | if cls["attributes"]: |
| | summary += f" - **Attributes**: {', '.join(cls['attributes'])}\n" |
| | summary += "\n" |
| |
|
| | |
| | if functions: |
| | summary += "## βοΈ **Standalone Functions**\n" |
| | for func in functions: |
| | summary += f"### **{func}()**\n" |
| |
|
| | |
| | if func in function_complexity: |
| | metrics = function_complexity[func] |
| | summary += ( |
| | f" - **Complexity**: {metrics['complexity']} (cyclomatic)\n" |
| | ) |
| | summary += f" - **Lines of Code**: {metrics['lines_of_code']}\n" |
| | summary += f" - **Parameters**: {metrics['parameter_count']}" |
| | if metrics["parameters"]: |
| | summary += f" ({', '.join(metrics['parameters'])})" |
| | summary += "\n" |
| | summary += f" - **Has Docstring**: {'β
' if metrics['has_docstring'] else 'β'}\n" |
| | summary += f" - **Lines**: {metrics['line_start']}-{metrics['line_end']}\n" |
| |
|
| | |
| | if call_graph_data and "function_metrics" in call_graph_data: |
| | if func in call_graph_data["function_metrics"]: |
| | call_metrics = call_graph_data["function_metrics"][func] |
| | summary += f" - **Calls Made**: {call_metrics['calls_made']}\n" |
| | if call_metrics["calls_to"]: |
| | summary += ( |
| | f" - Calls: {', '.join(call_metrics['calls_to'])}\n" |
| | ) |
| | summary += f" - **Called By**: {call_metrics['called_by_count']} functions\n" |
| | if call_metrics["called_by"]: |
| | summary += f" - Called by: {', '.join(call_metrics['called_by'])}\n" |
| |
|
| | summary += "\n" |
| |
|
| | |
| | if ( |
| | call_graph_data |
| | and "function_metrics" in call_graph_data |
| | and call_graph_data["total_calls"] > 0 |
| | ): |
| | summary += "## π **Function Call Analysis**\n" |
| |
|
| | |
| | sorted_by_calls = sorted( |
| | call_graph_data["function_metrics"].items(), |
| | key=lambda x: x[1]["called_by_count"], |
| | reverse=True, |
| | )[:5] |
| |
|
| | if sorted_by_calls and sorted_by_calls[0][1]["called_by_count"] > 0: |
| | summary += "**Most Called Functions:**\n" |
| | for func_name, metrics in sorted_by_calls: |
| | if metrics["called_by_count"] > 0: |
| | summary += f"β’ **{func_name}**: called {metrics['called_by_count']} times\n" |
| | summary += "\n" |
| |
|
| | |
| | sorted_by_complexity = sorted( |
| | call_graph_data["function_metrics"].items(), |
| | key=lambda x: x[1]["calls_made"], |
| | reverse=True, |
| | )[:5] |
| |
|
| | if sorted_by_complexity and sorted_by_complexity[0][1]["calls_made"] > 0: |
| | summary += "**Functions Making Most Calls:**\n" |
| | for func_name, metrics in sorted_by_complexity: |
| | if metrics["calls_made"] > 0: |
| | summary += ( |
| | f"β’ **{func_name}**: makes {metrics['calls_made']} calls\n" |
| | ) |
| | summary += "\n" |
| |
|
| | |
| | if function_complexity: |
| | summary += "## π **Complexity Analysis**\n" |
| |
|
| | |
| | sorted_complexity = sorted( |
| | function_complexity.items(), |
| | key=lambda x: x[1]["complexity"], |
| | reverse=True, |
| | )[:5] |
| |
|
| | summary += "**Most Complex Functions:**\n" |
| | for func_name, metrics in sorted_complexity: |
| | summary += f"β’ **{func_name}**: complexity {metrics['complexity']}, {metrics['lines_of_code']} lines\n" |
| |
|
| | |
| | total_functions = len(function_complexity) |
| | avg_complexity = ( |
| | sum(m["complexity"] for m in function_complexity.values()) |
| | / total_functions |
| | ) |
| | avg_lines = ( |
| | sum(m["lines_of_code"] for m in function_complexity.values()) |
| | / total_functions |
| | ) |
| | functions_with_docs = sum( |
| | 1 for m in function_complexity.values() if m["has_docstring"] |
| | ) |
| |
|
| | summary += "\n**Overall Function Metrics:**\n" |
| | summary += f"β’ **Average Complexity**: {avg_complexity:.1f}\n" |
| | summary += f"β’ **Average Lines per Function**: {avg_lines:.1f}\n" |
| | summary += f"β’ **Functions with Docstrings**: {functions_with_docs}/{total_functions} ({100*functions_with_docs/total_functions:.1f}%)\n" |
| | summary += "\n" |
| |
|
| | |
| | if imports: |
| | summary += "## π¦ **Imports**\n" |
| | unique_imports = list(set(imports)) |
| | for imp in unique_imports[:10]: |
| | summary += f"β’ {imp}\n" |
| | if len(unique_imports) > 10: |
| | summary += f"β’ ... and {len(unique_imports) - 10} more\n" |
| | summary += "\n" |
| |
|
| | |
| | if call_graph_data and "error" in call_graph_data: |
| | summary += "## β οΈ **Call Graph Analysis**\n" |
| | summary += f"Call graph generation failed: {call_graph_data['error']}\n\n" |
| | elif call_graph_data and "info" in call_graph_data: |
| | summary += "## π **Call Graph Analysis**\n" |
| | summary += f"{call_graph_data['info']}\n\n" |
| |
|
| | |
| | summary += "## π‘ **Recommendations**\n" |
| | if function_complexity: |
| | high_complexity = [ |
| | f for f, m in function_complexity.items() if m["complexity"] > 10 |
| | ] |
| | if high_complexity: |
| | summary += f"β’ Consider refactoring high-complexity functions: {', '.join(high_complexity)}\n" |
| |
|
| | no_docs = [ |
| | f for f, m in function_complexity.items() if not m["has_docstring"] |
| | ] |
| | if no_docs: |
| | summary += f"β’ Add docstrings to: {', '.join(no_docs[:5])}{'...' if len(no_docs) > 5 else ''}\n" |
| |
|
| | if call_graph_data and "function_metrics" in call_graph_data: |
| | isolated_functions = [ |
| | f |
| | for f, m in call_graph_data["function_metrics"].items() |
| | if m["calls_made"] == 0 and m["called_by_count"] == 0 |
| | ] |
| | if isolated_functions: |
| | summary += f"β’ Review isolated functions: {', '.join(isolated_functions[:3])}{'...' if len(isolated_functions) > 3 else ''}\n" |
| |
|
| | return summary |
| |
|
| | except SyntaxError as e: |
| | return f"β **Syntax Error in Python code:**\n```\n{str(e)}\n```" |
| | except Exception as e: |
| | return f"β **Error analyzing code:**\n```\n{str(e)}\n```" |
| |
|
| |
|
| | def list_example_files() -> list: |
| | """List all example .py files in the examples/ directory.""" |
| | examples_dir = os.path.join(os.getcwd(), "examples") |
| | if not os.path.exists(examples_dir): |
| | return [] |
| | return [f for f in os.listdir(examples_dir) if f.endswith(".py")] |
| |
|
| |
|
| | def get_sample_code(filename: str) -> str: |
| | """Return sample Python code from examples/ directory.""" |
| | examples_dir = os.path.join(os.getcwd(), "examples") |
| | file_path = os.path.join(examples_dir, filename) |
| | with open(file_path, "r", encoding="utf-8") as f: |
| | return f.read() |
| |
|
| |
|
| | def generate_all_diagrams(python_code: str, filename: str = "diagram") -> Tuple[Optional[str], Optional[str], str]: |
| | """Generate all diagrams and analysis at once. |
| | |
| | Args: |
| | python_code: The Python code to analyze |
| | filename: Base filename for diagrams |
| | |
| | Returns: |
| | Tuple of (uml_diagram_path, call_graph_path, analysis_text) |
| | """ |
| | if not python_code.strip(): |
| | return None, None, "No code provided for analysis." |
| | |
| | print("π Starting comprehensive diagram generation...") |
| | |
| | |
| | print("π Step 1/3: Generating UML class diagram...") |
| | uml_diagram_path = generate_diagram(python_code, filename) |
| | |
| | |
| | print("π Step 2/3: Generating call graph...") |
| | try: |
| | cleanup_testing_space() |
| | dot_content, call_graph_path, structured_data = generate_call_graph_with_pyan3(python_code) |
| | except Exception as e: |
| | print(f"β οΈ Call graph generation failed: {e}") |
| | call_graph_path = None |
| | |
| | |
| | print("π Step 3/3: Performing code analysis...") |
| | analysis_text = analyze_code_structure(python_code) |
| | |
| | print("β
All diagrams and analysis completed!") |
| | |
| | return uml_diagram_path, call_graph_path, analysis_text |
| |
|
| |
|
| | |
| | with gr.Blocks( |
| | title="Python UML Diagram Generator & MCP Server", |
| | theme=gr.themes.Soft(), |
| | css=""" |
| | .gradio-container { |
| | max-width: 1400px !important; |
| | } |
| | .code-input { |
| | font-family: 'Courier New', monospace !important; |
| | } |
| | """, |
| | ) as demo: |
| | |
| | gr.Markdown( |
| | """ |
| | # π Python UML Diagram Generator & MCP Server |
| | |
| | **Dual Functionality:** |
| | - π₯οΈ **Web Interface**: Generate UML class diagrams and call graphs from Python code |
| | - π€ **MCP Server**: Provides tools for AI assistants (Claude Desktop, Cursor, etc.) |
| | |
| | Transform your Python code into comprehensive visual diagrams and analysis! |
| | """ |
| | ) |
| |
|
| | with gr.Tab("π¨ Diagram Generator"): |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | gr.Markdown("### Input") |
| |
|
| | example_files = list_example_files() |
| | example_dropdown = gr.Dropdown( |
| | label="Choose Example", |
| | choices=example_files, |
| | value=example_files[0] if example_files else None, |
| | ) |
| |
|
| | code_input = gr.Textbox( |
| | label="Python Code", |
| | placeholder="Paste your Python code here...", |
| | lines=20, |
| | max_lines=35, |
| | value=get_sample_code(example_files[0]) if example_files else "", |
| | elem_classes=["code-input"], |
| | ) |
| |
|
| | with gr.Row(): |
| | filename_input = gr.Textbox( |
| | label="Diagram Name", |
| | value="my_diagram", |
| | placeholder="Enter a name for your diagram", |
| | scale=2, |
| | ) |
| |
|
| | with gr.Row(): |
| | generate_diagrams_btn = gr.Button( |
| | "π Generate Diagrams", variant="primary", size="lg" |
| | ) |
| |
|
| | with gr.Column(scale=1): |
| | gr.Markdown("### Generated UML Class Diagram") |
| |
|
| | uml_diagram_output = gr.Image( |
| | label="UML Class Diagram", |
| | show_download_button=True, |
| | height=300, |
| | ) |
| | |
| | gr.Markdown("### Generated Call Graph Diagram") |
| |
|
| | call_graph_output = gr.Image( |
| | label="Function Call Graph", |
| | show_download_button=True, |
| | height=300, |
| | ) |
| |
|
| | with gr.Row(): |
| | gr.Markdown("### Code Analysis") |
| | |
| | with gr.Row(): |
| | analysis_output = gr.Textbox( |
| | label="Comprehensive Code Analysis", |
| | lines=15, |
| | max_lines=25, |
| | interactive=False, |
| | show_copy_button=True, |
| | ) |
| |
|
| | with gr.Tab("βΉοΈ About & Help"): |
| | gr.Markdown( |
| | """ |
| | ## About This Tool |
| | |
| | This Python UML Diagram Generator helps you visualize the structure of your Python code by creating comprehensive diagrams and analysis. |
| | ### Inspiration: |
| | The idea for this mcp server was inspired by a tweet made by karpathy [tweet](https://x.com/karpathy/status/1930305209747812559). |
| | He makes the point that generated images are easy to discriminate by humans while going through a 300 line LLM generated code is time consuming. |
| | This tool aims to provide a visual quick smell test for generated code so that user can quickly identify issues instead of going through the code line by line. |
| | This is only a very rough and basic implementation of the idea. |
| | Making compound AI systems instead of text-to-text chatbots is the necessary direction. |
| | |
| | ### β¨ Features: |
| | - **UML Class Diagrams**: Automatically identifies classes, methods, attributes, and inheritance |
| | - **Call Graph Diagrams**: Visualizes function dependencies and call relationships |
| | - **Code Analysis**: Provides detailed structure analysis with complexity metrics |
| | - **MCP Integration**: Works with AI assistants via Model Context Protocol |
| | |
| | ### π How to Use: |
| | 1. **Paste Code**: Enter your Python code in the text area |
| | 2. **Set Name**: Choose a name for your diagrams (optional) |
| | 3. **Generate**: Click "Generate Diagrams" to create all visualizations and analysis |
| | 4. **Download**: Save the generated diagram images |
| | 5. **Review**: Read the comprehensive code analysis |
| | |
| | ### π§ Technical Details: |
| | - Built with **Gradio** for the web interface |
| | - Uses **py2puml** for Python-to-PlantUML conversion |
| | - **PlantUML** for UML diagram rendering |
| | - **pyan3** and **Graphviz** for call graph generation |
| | - **AST** (Abstract Syntax Tree) for code analysis |
| | |
| | ### π‘ Tips: |
| | - Include type hints for better diagram quality |
| | - Use meaningful class and method names |
| | - Keep inheritance hierarchies clear |
| | - Add docstrings for better understanding |
| | - Works great with both class-based and function-based code |
| | |
| | ### π Troubleshooting: |
| | - **No UML diagram generated**: Check if your code contains class definitions |
| | - **No call graph generated**: Ensure your code has function definitions and calls |
| | - **Syntax errors**: Ensure your Python code is valid |
| | - **Import errors**: Stick to standard library imports for best results |
| | |
| | ## Model Context Protocol (MCP) Server |
| | |
| | This application automatically serves as an MCP server for AI assistants! |
| | |
| | ### π For Hugging Face Spaces (Public): |
| | ```json |
| | { |
| | "mcpServers": { |
| | "python-diagram-generator": { |
| | "url": "https://your-username-space-name.hf.space/gradio_api/mcp/sse" |
| | } |
| | } |
| | } |
| | ``` |
| | |
| | ### π For Local Development: |
| | ```json |
| | { |
| | "mcpServers": { |
| | "python-diagram-generator": { |
| | "command": "npx", |
| | "args": [ |
| | "mcp-remote", |
| | "http://127.0.0.1:7860/gradio_api/mcp/sse", |
| | "--transport", |
| | "sse-only" |
| | ] |
| | } |
| | } |
| | } |
| | ``` |
| | |
| | ### π For Private Spaces: |
| | ```json |
| | { |
| | "mcpServers": { |
| | "python-diagram-generator": { |
| | "url": "https://your-username-space-name.hf.space/gradio_api/mcp/sse", |
| | "headers": { |
| | "Authorization": "Bearer hf_your_token_here" |
| | } |
| | } |
| | } |
| | } |
| | ``` |
| | |
| | ### π Setup Instructions: |
| | 1. Install Node.js and mcp-remote: `npm install -g mcp-remote` |
| | 2. Add the configuration above to your MCP client |
| | 3. Restart your MCP client (e.g., Claude Desktop) |
| | 4. Test with prompts like: "Generate a UML diagram for this Python code: [your code]" |
| | |
| | --- |
| | |
| | **Local MCP Endpoint**: `http://127.0.0.1:7860/gradio_api/mcp/sse` |
| | **MCP Schema**: View at `/gradio_api/mcp/schema` |
| | |
| | ### π Future Features: |
| | - Logic flowcharts |
| | - Data flow diagrams |
| | - State machine diagrams |
| | - Multi-file analysis |
| | - Enhanced UML features |
| | """ |
| | ) |
| |
|
| | |
| | def load_example(example_filename): |
| | return get_sample_code(example_filename) |
| |
|
| | example_dropdown.change( |
| | fn=load_example, |
| | inputs=example_dropdown, |
| | outputs=code_input, |
| | ) |
| |
|
| | generate_diagrams_btn.click( |
| | fn=generate_all_diagrams, |
| | inputs=[code_input, filename_input], |
| | outputs=[uml_diagram_output, call_graph_output, analysis_output], |
| | show_progress=True, |
| | ) |
| |
|
| | code_input.change( |
| | fn=analyze_code_structure, |
| | inputs=code_input, |
| | outputs=analysis_output, |
| | show_progress=False, |
| | ) |
| |
|
| | |
| | if __name__ == "__main__": |
| | |
| | setup_testing_space() |
| |
|
| | demo.launch( |
| | mcp_server=True, |
| | show_api=True, |
| | show_error=True, |
| | share=True, |
| | |
| | ) |
| |
|