import gradio as gr import sys import os import tempfile import shutil import ast import time import subprocess import re from typing import List, Dict, Optional, Tuple, Any from py2puml.py2puml import py2puml from plantuml import PlantUML import pyan from pathlib import Path if os.name == "nt": # nt == Windows graphviz_bin = r"C:\\Program Files\\Graphviz\\bin" if graphviz_bin not in os.environ["PATH"]: os.environ["PATH"] += os.pathsep + graphviz_bin def setup_testing_space(): """Create persistent testing_space directory and __init__.py at startup.""" testing_dir = os.path.join(os.getcwd(), "testing_space") os.makedirs(testing_dir, exist_ok=True) init_file = os.path.join(testing_dir, "__init__.py") if not os.path.exists(init_file): with open(init_file, "w", encoding="utf-8") as f: f.write("# Testing space for py2puml analysis\n") print("๐Ÿ“ Created testing_space directory and __init__.py") else: print("๐Ÿ”„ testing_space directory already exists") def cleanup_testing_space(): """Remove all .py files except __init__.py from testing_space.""" testing_dir = os.path.join(os.getcwd(), "testing_space") if not os.path.exists(testing_dir): print("โš ๏ธ testing_space directory not found, creating it...") setup_testing_space() return # Clean up any leftover .py files (keep only __init__.py) files_removed = 0 for file in os.listdir(testing_dir): if file.endswith(".py") and file != "__init__.py": file_path = os.path.join(testing_dir, file) try: os.remove(file_path) files_removed += 1 except Exception as e: print(f"โš ๏ธ Could not remove {file}: {e}") if files_removed > 0: print(f"๐Ÿงน Cleaned up {files_removed} leftover .py files from testing_space") def verify_testing_space(): """Verify testing_space contains only __init__.py.""" testing_dir = os.path.join(os.getcwd(), "testing_space") if not os.path.exists(testing_dir): return False files = os.listdir(testing_dir) expected_files = ["__init__.py"] return files == expected_files def generate_call_graph_with_pyan3( python_code: str, filename: str = "analysis" ) -> Tuple[Optional[str], Optional[str], Dict[str, Any]]: """Generate call graph using pyan3 and return DOT content, PNG path, and structured data. Args: python_code: The Python code to analyze filename: Base filename for temporary files Returns: Tuple of (dot_content, png_path, structured_data) """ if not python_code.strip(): return None, None, {} # Create unique filename using timestamp timestamp = str(int(time.time() * 1000)) unique_filename = f"{filename}_{timestamp}" # Paths testing_dir = os.path.join(os.getcwd(), "testing_space") code_file = os.path.join(testing_dir, f"{unique_filename}.py") try: # Write Python code to file with open(code_file, "w", encoding="utf-8") as f: f.write(python_code) print(f"๐Ÿ“Š Generating call graph for: {unique_filename}.py") try: dot_content = pyan.create_callgraph( filenames=[str(code_file)], format="dot", colored=True, grouped=True, annotated=True, ) png_path = None with tempfile.TemporaryDirectory() as temp_dir: dot_file = os.path.join(temp_dir, f"{unique_filename}.dot") temp_png = os.path.join(temp_dir, f"{unique_filename}.png") # Write DOT content to file with open(dot_file, "w", encoding="utf-8") as f: f.write(dot_content) # Generate PNG using dot command dot_cmd = ["dot", "-Tpng", dot_file, "-o", temp_png] try: subprocess.run(dot_cmd, check=True, timeout=30) if os.path.exists(temp_png): # Copy to permanent location permanent_dir = os.path.join(os.getcwd(), "temp_diagrams") os.makedirs(permanent_dir, exist_ok=True) png_path = os.path.join( permanent_dir, f"callgraph_{unique_filename}.png" ) shutil.copy2(temp_png, png_path) print(f"๐ŸŽจ Call graph PNG saved: {os.path.basename(png_path)}") except subprocess.SubprocessError as e: print(f"โš ๏ธ Graphviz PNG generation failed: {e}") # Continue without PNG, DOT content is still useful # Parse DOT content for structured data structured_data = parse_call_graph_data(dot_content) return dot_content, png_path, structured_data except subprocess.TimeoutExpired: print("โš ๏ธ pyan3 analysis timed out, trying simplified approach...") return try_fallback_analysis(python_code, unique_filename) except subprocess.SubprocessError as e: print(f"โš ๏ธ pyan3 execution failed: {e}, trying fallback...") return try_fallback_analysis(python_code, unique_filename) except Exception as e: print(f"โŒ Call graph generation error: {e}") return None, None, {"error": str(e)} finally: # Clean up temporary file if os.path.exists(code_file): try: os.remove(code_file) print(f"๐Ÿงน Cleaned up analysis file: {unique_filename}.py") except Exception as e: print(f"โš ๏ธ Could not remove analysis file: {e}") def parse_call_graph_data(dot_content: str) -> Dict[str, Any]: """Parse pyan3 DOT output into structured function call data. Args: dot_content: DOT format string from pyan3 Returns: Dictionary with parsed call graph information """ if not dot_content: return {} try: # Extract nodes (functions/classes) node_pattern = r'"([^"]+)"\s*\[' nodes = re.findall(node_pattern, dot_content) # Extract edges (function calls) edge_pattern = r'"([^"]+)"\s*->\s*"([^"]+)"' edges = re.findall(edge_pattern, dot_content) # Build function call mapping call_graph = {} called_by = {} for caller, callee in edges: if caller not in call_graph: call_graph[caller] = [] call_graph[caller].append(callee) if callee not in called_by: called_by[callee] = [] called_by[callee].append(caller) # Calculate metrics function_metrics = {} for node in nodes: out_degree = len(call_graph.get(node, [])) in_degree = len(called_by.get(node, [])) function_metrics[node] = { "calls_made": out_degree, "called_by_count": in_degree, "calls_to": call_graph.get(node, []), "called_by": called_by.get(node, []), } return { "nodes": nodes, "edges": edges, "total_functions": len(nodes), "total_calls": len(edges), "call_graph": call_graph, "function_metrics": function_metrics, } except Exception as e: return {"parse_error": str(e)} def try_fallback_analysis( python_code: str, unique_filename: str ) -> Tuple[Optional[str], Optional[str], Dict[str, Any]]: """Fallback analysis when pyan3 fails - basic function call detection. Args: python_code: The Python code to analyze unique_filename: Unique filename for this analysis Returns: Tuple of (None, None, fallback_analysis_data) """ print("๐Ÿ”„ Using fallback analysis approach...") try: import ast import re tree = ast.parse(python_code) functions = [] calls = [] # Extract function definitions for node in ast.walk(tree): if isinstance(node, ast.FunctionDef): functions.append(node.name) # Simple regex-based call detection (fallback approach) for func in functions: # Look for calls to this function pattern = rf"\b{re.escape(func)}\s*\(" if re.search(pattern, python_code): calls.append(("unknown", func)) return ( None, None, { "fallback": True, "functions_detected": functions, "total_functions": len(functions), "total_calls": len(calls), "info": f"Fallback analysis: detected {len(functions)} functions", "function_metrics": { func: { "calls_made": 0, "called_by_count": 0, "calls_to": [], "called_by": [], } for func in functions }, }, ) except Exception as e: return None, None, {"error": f"Fallback analysis also failed: {str(e)}"} def analyze_function_complexity(python_code: str) -> Dict[str, Any]: """Analyze function complexity using AST. Args: python_code: The Python code to analyze Returns: Dictionary with function complexity metrics """ if not python_code.strip(): return {} try: tree = ast.parse(python_code) function_analysis = {} for node in ast.walk(tree): if isinstance(node, ast.FunctionDef): # Calculate cyclomatic complexity (simplified) complexity = 1 # Base complexity for child in ast.walk(node): if isinstance( child, ( ast.If, ast.While, ast.For, ast.Try, ast.ExceptHandler, ast.With, ast.Assert, ), ): complexity += 1 elif isinstance(child, ast.BoolOp): complexity += len(child.values) - 1 # Count lines of code lines = ( node.end_lineno - node.lineno + 1 if hasattr(node, "end_lineno") else 0 ) # Extract parameters params = [arg.arg for arg in node.args.args] # Check for docstring has_docstring = ( len(node.body) > 0 and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Constant) and isinstance(node.body[0].value.value, str) ) function_analysis[node.name] = { "complexity": complexity, "lines_of_code": lines, "parameter_count": len(params), "parameters": params, "has_docstring": has_docstring, "line_start": node.lineno, "line_end": getattr(node, "end_lineno", node.lineno), } return function_analysis except Exception as e: return {"error": str(e)} def generate_diagram(python_code: str, filename: str = "diagram") -> Optional[str]: """Generate a UML class diagram from Python code. Args: python_code: The Python code to analyze and convert to UML filename: Optional name for the generated diagram file Returns: Path to the generated PNG diagram image or None if failed """ if not python_code.strip(): return None print(f"๐Ÿ”„ Processing code for diagram generation...") # Clean testing space (ensure only __init__.py exists) cleanup_testing_space() # Verify clean state if not verify_testing_space(): print("โš ๏ธ testing_space verification failed, recreating...") setup_testing_space() cleanup_testing_space() # Create unique filename using timestamp timestamp = str(int(time.time() * 1000)) # millisecond timestamp unique_filename = f"{filename}_{timestamp}" # Paths testing_dir = os.path.join(os.getcwd(), "testing_space") code_file = os.path.join(testing_dir, f"{unique_filename}.py") # Use PlantUML web service for rendering server = PlantUML(url="http://www.plantuml.com/plantuml/img/") try: # Write Python code to file in testing_space with open(code_file, "w", encoding="utf-8") as f: f.write(python_code) print(f"๐Ÿ“ Created temporary file: testing_space/{unique_filename}.py") # Generate PlantUML content using py2puml (no sys.path manipulation needed) print(f"๐Ÿ“ Generating PlantUML content...") puml_content_lines = py2puml( os.path.join( testing_dir, unique_filename ), # path to the .py file (without extension) f"testing_space.{unique_filename}", # module name ) puml_content = "".join(puml_content_lines) if not puml_content.strip(): print("โš ๏ธ No UML content generated - check if your code contains classes") return None # Create temporary directory for PlantUML processing with tempfile.TemporaryDirectory() as temp_dir: # Save PUML file puml_file = os.path.join(temp_dir, f"{unique_filename}.puml") with open(puml_file, "w", encoding="utf-8") as f: f.write(puml_content) print(f"๐ŸŽจ Rendering diagram...") # Generate PNG output_png = os.path.join(temp_dir, f"{unique_filename}.png") server.processes_file(puml_file, outfile=output_png) if os.path.exists(output_png): print("โœ… Diagram generated successfully!") # Copy to a permanent location for Gradio to serve permanent_dir = os.path.join(os.getcwd(), "temp_diagrams") os.makedirs(permanent_dir, exist_ok=True) permanent_path = os.path.join( permanent_dir, f"{filename}_{hash(python_code) % 10000}.png" ) shutil.copy2(output_png, permanent_path) return permanent_path else: print("โŒ Failed to generate PNG") return None except Exception as e: print(f"โŒ Error: {e}") return None finally: # Always clean up the temporary .py file if os.path.exists(code_file): try: os.remove(code_file) print(f"๐Ÿงน Cleaned up temporary file: {unique_filename}.py") except Exception as e: print(f"โš ๏ธ Could not remove temporary file: {e}") def analyze_code_structure(python_code: str) -> str: """Enhanced code analysis combining AST + pyan3 call graphs. Args: python_code: The Python code to analyze Returns: Comprehensive analysis report in markdown format """ if not python_code.strip(): return "No code provided for analysis." try: # Basic AST analysis tree = ast.parse(python_code) classes = [] functions = [] imports = [] for node in ast.walk(tree): if isinstance(node, ast.ClassDef): methods = [] attributes = [] for item in node.body: if isinstance(item, ast.FunctionDef): methods.append(item.name) elif isinstance(item, ast.Assign): for target in item.targets: if isinstance(target, ast.Name): attributes.append(target.id) # Check for inheritance parents = [base.id for base in node.bases if isinstance(base, ast.Name)] classes.append( { "name": node.name, "methods": methods, "attributes": attributes, "parents": parents, } ) elif isinstance(node, ast.FunctionDef): # Check if it's a top-level function (not inside a class) is_method = any( isinstance(parent, ast.ClassDef) for parent in ast.walk(tree) if hasattr(parent, "body") and node in getattr(parent, "body", []) ) if not is_method: functions.append(node.name) elif isinstance(node, (ast.Import, ast.ImportFrom)): if isinstance(node, ast.Import): for alias in node.names: imports.append(alias.name) else: module = node.module or "" for alias in node.names: imports.append( f"{module}.{alias.name}" if module else alias.name ) # Enhanced function complexity analysis function_complexity = analyze_function_complexity(python_code) # Call graph analysis (for files with functions) call_graph_data = {} if functions or any(classes): # Only run if there are functions to analyze try: cleanup_testing_space() # Ensure clean state dot_content, png_path, call_graph_data = generate_call_graph_with_pyan3( python_code ) except Exception as e: print(f"โš ๏ธ Call graph analysis failed: {e}") call_graph_data = {"error": str(e)} # Build comprehensive summary summary = "๐Ÿ“Š **Enhanced Code Analysis Results**\n\n" # === OVERVIEW SECTION === summary += "## ๐Ÿ“‹ **Overview**\n" summary += f"โ€ข **{len(classes)}** classes found\n" summary += f"โ€ข **{len(functions)}** standalone functions found\n" summary += f"โ€ข **{len(set(imports))}** unique imports\n" if call_graph_data and "total_functions" in call_graph_data: summary += f"โ€ข **{call_graph_data['total_functions']}** total functions/methods in call graph\n" summary += ( f"โ€ข **{call_graph_data['total_calls']}** function calls detected\n" ) summary += "\n" # === CLASSES SECTION === if classes: summary += "## ๐Ÿ—๏ธ **Classes**\n" for cls in classes: summary += f"### **{cls['name']}**\n" if cls["parents"]: summary += f" - **Inherits from**: {', '.join(cls['parents'])}\n" summary += f" - **Methods**: {len(cls['methods'])}" if cls["methods"]: summary += f" ({', '.join(cls['methods'])})" summary += "\n" if cls["attributes"]: summary += f" - **Attributes**: {', '.join(cls['attributes'])}\n" summary += "\n" # === STANDALONE FUNCTIONS SECTION === if functions: summary += "## โš™๏ธ **Standalone Functions**\n" for func in functions: summary += f"### **{func}()**\n" # Add complexity metrics if available if func in function_complexity: metrics = function_complexity[func] summary += ( f" - **Complexity**: {metrics['complexity']} (cyclomatic)\n" ) summary += f" - **Lines of Code**: {metrics['lines_of_code']}\n" summary += f" - **Parameters**: {metrics['parameter_count']}" if metrics["parameters"]: summary += f" ({', '.join(metrics['parameters'])})" summary += "\n" summary += f" - **Has Docstring**: {'โœ…' if metrics['has_docstring'] else 'โŒ'}\n" summary += f" - **Lines**: {metrics['line_start']}-{metrics['line_end']}\n" # Add call graph info if available if call_graph_data and "function_metrics" in call_graph_data: if func in call_graph_data["function_metrics"]: call_metrics = call_graph_data["function_metrics"][func] summary += f" - **Calls Made**: {call_metrics['calls_made']}\n" if call_metrics["calls_to"]: summary += ( f" - Calls: {', '.join(call_metrics['calls_to'])}\n" ) summary += f" - **Called By**: {call_metrics['called_by_count']} functions\n" if call_metrics["called_by"]: summary += f" - Called by: {', '.join(call_metrics['called_by'])}\n" summary += "\n" # === CALL GRAPH ANALYSIS === if ( call_graph_data and "function_metrics" in call_graph_data and call_graph_data["total_calls"] > 0 ): summary += "## ๐Ÿ”— **Function Call Analysis**\n" # Most called functions sorted_by_calls = sorted( call_graph_data["function_metrics"].items(), key=lambda x: x[1]["called_by_count"], reverse=True, )[:5] if sorted_by_calls and sorted_by_calls[0][1]["called_by_count"] > 0: summary += "**Most Called Functions:**\n" for func_name, metrics in sorted_by_calls: if metrics["called_by_count"] > 0: summary += f"โ€ข **{func_name}**: called {metrics['called_by_count']} times\n" summary += "\n" # Most complex functions (by calls made) sorted_by_complexity = sorted( call_graph_data["function_metrics"].items(), key=lambda x: x[1]["calls_made"], reverse=True, )[:5] if sorted_by_complexity and sorted_by_complexity[0][1]["calls_made"] > 0: summary += "**Functions Making Most Calls:**\n" for func_name, metrics in sorted_by_complexity: if metrics["calls_made"] > 0: summary += ( f"โ€ข **{func_name}**: makes {metrics['calls_made']} calls\n" ) summary += "\n" # === COMPLEXITY ANALYSIS === if function_complexity: summary += "## ๐Ÿ“ˆ **Complexity Analysis**\n" # Sort by complexity sorted_complexity = sorted( function_complexity.items(), key=lambda x: x[1]["complexity"], reverse=True, )[:5] summary += "**Most Complex Functions:**\n" for func_name, metrics in sorted_complexity: summary += f"โ€ข **{func_name}**: complexity {metrics['complexity']}, {metrics['lines_of_code']} lines\n" # Overall stats total_functions = len(function_complexity) avg_complexity = ( sum(m["complexity"] for m in function_complexity.values()) / total_functions ) avg_lines = ( sum(m["lines_of_code"] for m in function_complexity.values()) / total_functions ) functions_with_docs = sum( 1 for m in function_complexity.values() if m["has_docstring"] ) summary += "\n**Overall Function Metrics:**\n" summary += f"โ€ข **Average Complexity**: {avg_complexity:.1f}\n" summary += f"โ€ข **Average Lines per Function**: {avg_lines:.1f}\n" summary += f"โ€ข **Functions with Docstrings**: {functions_with_docs}/{total_functions} ({100*functions_with_docs/total_functions:.1f}%)\n" summary += "\n" # === IMPORTS SECTION === if imports: summary += "## ๐Ÿ“ฆ **Imports**\n" unique_imports = list(set(imports)) for imp in unique_imports[:10]: # Show first 10 imports summary += f"โ€ข {imp}\n" if len(unique_imports) > 10: summary += f"โ€ข ... and {len(unique_imports) - 10} more\n" summary += "\n" # === CALL GRAPH ERROR/INFO === if call_graph_data and "error" in call_graph_data: summary += "## โš ๏ธ **Call Graph Analysis**\n" summary += f"Call graph generation failed: {call_graph_data['error']}\n\n" elif call_graph_data and "info" in call_graph_data: summary += "## ๐Ÿ“Š **Call Graph Analysis**\n" summary += f"{call_graph_data['info']}\n\n" # === RECOMMENDATIONS === summary += "## ๐Ÿ’ก **Recommendations**\n" if function_complexity: high_complexity = [ f for f, m in function_complexity.items() if m["complexity"] > 10 ] if high_complexity: summary += f"โ€ข Consider refactoring high-complexity functions: {', '.join(high_complexity)}\n" no_docs = [ f for f, m in function_complexity.items() if not m["has_docstring"] ] if no_docs: summary += f"โ€ข Add docstrings to: {', '.join(no_docs[:5])}{'...' if len(no_docs) > 5 else ''}\n" if call_graph_data and "function_metrics" in call_graph_data: isolated_functions = [ f for f, m in call_graph_data["function_metrics"].items() if m["calls_made"] == 0 and m["called_by_count"] == 0 ] if isolated_functions: summary += f"โ€ข Review isolated functions: {', '.join(isolated_functions[:3])}{'...' if len(isolated_functions) > 3 else ''}\n" return summary except SyntaxError as e: return f"โŒ **Syntax Error in Python code:**\n```\n{str(e)}\n```" except Exception as e: return f"โŒ **Error analyzing code:**\n```\n{str(e)}\n```" def list_example_files() -> list: """List all example .py files in the examples/ directory.""" examples_dir = os.path.join(os.getcwd(), "examples") if not os.path.exists(examples_dir): return [] return [f for f in os.listdir(examples_dir) if f.endswith(".py")] def get_sample_code(filename: str) -> str: """Return sample Python code from examples/ directory.""" examples_dir = os.path.join(os.getcwd(), "examples") file_path = os.path.join(examples_dir, filename) with open(file_path, "r", encoding="utf-8") as f: return f.read() def generate_all_diagrams(python_code: str, filename: str = "diagram") -> Tuple[Optional[str], Optional[str], str]: """Generate all diagrams and analysis at once. Args: python_code: The Python code to analyze filename: Base filename for diagrams Returns: Tuple of (uml_diagram_path, call_graph_path, analysis_text) """ if not python_code.strip(): return None, None, "No code provided for analysis." print("๐Ÿš€ Starting comprehensive diagram generation...") # Step 1: Generate UML Class Diagram print("๐Ÿ“Š Step 1/3: Generating UML class diagram...") uml_diagram_path = generate_diagram(python_code, filename) # Step 2: Generate Call Graph print("๐Ÿ”— Step 2/3: Generating call graph...") try: cleanup_testing_space() dot_content, call_graph_path, structured_data = generate_call_graph_with_pyan3(python_code) except Exception as e: print(f"โš ๏ธ Call graph generation failed: {e}") call_graph_path = None # Step 3: Generate Analysis print("๐Ÿ“ˆ Step 3/3: Performing code analysis...") analysis_text = analyze_code_structure(python_code) print("โœ… All diagrams and analysis completed!") return uml_diagram_path, call_graph_path, analysis_text # Create Gradio interface with gr.Blocks( title="Python UML Diagram Generator & MCP Server", theme=gr.themes.Soft(), css=""" .gradio-container { max-width: 1400px !important; } .code-input { font-family: 'Courier New', monospace !important; } """, ) as demo: # Header gr.Markdown( """ # ๐Ÿ Python UML Diagram Generator & MCP Server **Dual Functionality:** - ๐Ÿ–ฅ๏ธ **Web Interface**: Generate UML class diagrams and call graphs from Python code - ๐Ÿค– **MCP Server**: Provides tools for AI assistants (Claude Desktop, Cursor, etc.) Transform your Python code into comprehensive visual diagrams and analysis! """ ) with gr.Tab("๐ŸŽจ Diagram Generator"): with gr.Row(): with gr.Column(scale=1): gr.Markdown("### Input") example_files = list_example_files() example_dropdown = gr.Dropdown( label="Choose Example", choices=example_files, value=example_files[0] if example_files else None, ) code_input = gr.Textbox( label="Python Code", placeholder="Paste your Python code here...", lines=20, max_lines=35, value=get_sample_code(example_files[0]) if example_files else "", elem_classes=["code-input"], ) with gr.Row(): filename_input = gr.Textbox( label="Diagram Name", value="my_diagram", placeholder="Enter a name for your diagram", scale=2, ) with gr.Row(): generate_diagrams_btn = gr.Button( "๐Ÿ”„ Generate Diagrams", variant="primary", size="lg" ) with gr.Column(scale=1): gr.Markdown("### Generated UML Class Diagram") uml_diagram_output = gr.Image( label="UML Class Diagram", show_download_button=True, height=300, ) gr.Markdown("### Generated Call Graph Diagram") call_graph_output = gr.Image( label="Function Call Graph", show_download_button=True, height=300, ) with gr.Row(): gr.Markdown("### Code Analysis") with gr.Row(): analysis_output = gr.Textbox( label="Comprehensive Code Analysis", lines=15, max_lines=25, interactive=False, show_copy_button=True, ) with gr.Tab("โ„น๏ธ About & Help"): gr.Markdown( """ ## About This Tool This Python UML Diagram Generator helps you visualize the structure of your Python code by creating comprehensive diagrams and analysis. ### Inspiration: The idea for this mcp server was inspired by a tweet made by karpathy [tweet](https://x.com/karpathy/status/1930305209747812559). He makes the point that generated images are easy to discriminate by humans while going through a 300 line LLM generated code is time consuming. This tool aims to provide a visual quick smell test for generated code so that user can quickly identify issues instead of going through the code line by line. This is only a very rough and basic implementation of the idea. Making compound AI systems instead of text-to-text chatbots is the necessary direction. ### โœจ Features: - **UML Class Diagrams**: Automatically identifies classes, methods, attributes, and inheritance - **Call Graph Diagrams**: Visualizes function dependencies and call relationships - **Code Analysis**: Provides detailed structure analysis with complexity metrics - **MCP Integration**: Works with AI assistants via Model Context Protocol ### ๐Ÿ“š How to Use: 1. **Paste Code**: Enter your Python code in the text area 2. **Set Name**: Choose a name for your diagrams (optional) 3. **Generate**: Click "Generate Diagrams" to create all visualizations and analysis 4. **Download**: Save the generated diagram images 5. **Review**: Read the comprehensive code analysis ### ๐Ÿ”ง Technical Details: - Built with **Gradio** for the web interface - Uses **py2puml** for Python-to-PlantUML conversion - **PlantUML** for UML diagram rendering - **pyan3** and **Graphviz** for call graph generation - **AST** (Abstract Syntax Tree) for code analysis ### ๐Ÿ’ก Tips: - Include type hints for better diagram quality - Use meaningful class and method names - Keep inheritance hierarchies clear - Add docstrings for better understanding - Works great with both class-based and function-based code ### ๐Ÿ› Troubleshooting: - **No UML diagram generated**: Check if your code contains class definitions - **No call graph generated**: Ensure your code has function definitions and calls - **Syntax errors**: Ensure your Python code is valid - **Import errors**: Stick to standard library imports for best results ## Model Context Protocol (MCP) Server This application automatically serves as an MCP server for AI assistants! ### ๐ŸŒ For Hugging Face Spaces (Public): ```json { "mcpServers": { "python-diagram-generator": { "url": "https://your-username-space-name.hf.space/gradio_api/mcp/sse" } } } ``` ### ๐Ÿ  For Local Development: ```json { "mcpServers": { "python-diagram-generator": { "command": "npx", "args": [ "mcp-remote", "http://127.0.0.1:7860/gradio_api/mcp/sse", "--transport", "sse-only" ] } } } ``` ### ๐Ÿ”’ For Private Spaces: ```json { "mcpServers": { "python-diagram-generator": { "url": "https://your-username-space-name.hf.space/gradio_api/mcp/sse", "headers": { "Authorization": "Bearer hf_your_token_here" } } } } ``` ### ๐Ÿ“‹ Setup Instructions: 1. Install Node.js and mcp-remote: `npm install -g mcp-remote` 2. Add the configuration above to your MCP client 3. Restart your MCP client (e.g., Claude Desktop) 4. Test with prompts like: "Generate a UML diagram for this Python code: [your code]" --- **Local MCP Endpoint**: `http://127.0.0.1:7860/gradio_api/mcp/sse` **MCP Schema**: View at `/gradio_api/mcp/schema` ### ๐Ÿš€ Future Features: - Logic flowcharts - Data flow diagrams - State machine diagrams - Multi-file analysis - Enhanced UML features """ ) # Event handlers def load_example(example_filename): return get_sample_code(example_filename) example_dropdown.change( fn=load_example, inputs=example_dropdown, outputs=code_input, ) generate_diagrams_btn.click( fn=generate_all_diagrams, inputs=[code_input, filename_input], outputs=[uml_diagram_output, call_graph_output, analysis_output], show_progress=True, ) code_input.change( fn=analyze_code_structure, inputs=code_input, outputs=analysis_output, show_progress=False, ) # Launch configuration if __name__ == "__main__": # Setup persistent testing space at startup setup_testing_space() demo.launch( mcp_server=True, # Enable MCP functionality show_api=True, # Show API documentation show_error=True, # Show errors in interface share=True, # Share the app publicly # debug=True, # Enable debug mode for development )