""" Python Dependency Compatibility Board A tool to parse, analyze, and resolve Python package dependencies. """ import re import json import tempfile import subprocess from pathlib import Path from typing import List, Dict, Tuple, Optional, Set from difflib import get_close_matches import requests from packaging.requirements import Requirement from packaging.specifiers import SpecifierSet from packaging.version import Version # Import ML models (with graceful fallback) try: from ml_models import ConflictPredictor, PackageEmbeddings ML_AVAILABLE = True except ImportError: ML_AVAILABLE = False print("Warning: ML models not available. Some features will be disabled.") class DependencyParser: """Parse requirements.txt and library lists into structured dependencies.""" @staticmethod def parse_requirements_text(text: str) -> List[Dict]: """Parse requirements.txt content into structured format.""" dependencies = [] seen_packages = {} for line in text.strip().split('\n'): line = line.strip() if not line or line.startswith('#'): continue # Remove comments if '#' in line: line = line[:line.index('#')].strip() try: req = Requirement(line) package_name = req.name.lower() # Handle duplicate packages if package_name in seen_packages: # Merge or warn about duplicates existing = seen_packages[package_name] if existing['specifier'] != str(req.specifier): dependencies.append({ 'package': package_name, 'specifier': str(req.specifier) if req.specifier else '', 'extras': list(req.extras) if req.extras else [], 'marker': str(req.marker) if req.marker else '', 'original': line, 'conflict': f"Duplicate: {existing['original']} vs {line}" }) continue dep = { 'package': package_name, 'specifier': str(req.specifier) if req.specifier else '', 'extras': list(req.extras) if req.extras else [], 'marker': str(req.marker) if req.marker else '', 'original': line, 'conflict': None } dependencies.append(dep) seen_packages[package_name] = dep except Exception as e: # Handle malformed lines dependencies.append({ 'package': line.split('==')[0].split('>=')[0].split('<=')[0].split('[')[0].strip(), 'specifier': '', 'extras': [], 'marker': '', 'original': line, 'conflict': f"Parse error: {str(e)}" }) return dependencies @staticmethod def parse_library_list(text: str) -> List[Dict]: """Parse a simple list of library names.""" dependencies = [] for line in text.strip().split('\n'): line = line.strip() if not line or line.startswith('#'): continue # Extract package name (remove version specifiers if present) package_name = re.split(r'[<>=!]', line)[0].strip() package_name = re.split(r'\[', package_name)[0].strip() if package_name: dependencies.append({ 'package': package_name.lower(), 'specifier': '', 'extras': [], 'marker': '', 'original': package_name, 'conflict': None }) return dependencies class DependencyResolver: """Resolve dependencies and check compatibility.""" def __init__(self, python_version: str = "3.10", platform: str = "any", device: str = "cpu"): self.python_version = python_version self.platform = platform self.device = device def build_dependency_graph(self, dependencies: List[Dict], deep_mode: bool = False) -> Dict: """Build dependency graph (simplified - in production would query PyPI).""" graph = { 'nodes': {}, 'edges': [], 'conflicts': [] } for dep in dependencies: package = dep['package'] graph['nodes'][package] = { 'specifier': dep['specifier'], 'extras': dep['extras'], 'marker': dep['marker'], 'conflict': dep.get('conflict') } if dep.get('conflict'): graph['conflicts'].append({ 'package': package, 'reason': dep['conflict'] }) # In deep mode, would fetch transitive dependencies from PyPI # For now, we'll use a simplified approach return graph def check_compatibility(self, graph: Dict) -> Tuple[bool, List[str]]: """Check version compatibility across the graph.""" issues = [] # Check for duplicate package conflicts for conflict in graph['conflicts']: issues.append(f"Conflict in {conflict['package']}: {conflict['reason']}") # Check known compatibility issues nodes = graph['nodes'] # PyTorch Lightning + PyTorch compatibility if 'pytorch-lightning' in nodes and 'torch' in nodes: pl_spec = nodes['pytorch-lightning']['specifier'] torch_spec = nodes['torch']['specifier'] # Simplified check - in production would parse versions properly if '==2.' in pl_spec or '>=2.' in pl_spec: if '==1.' in torch_spec or ('<2.' in torch_spec and '==1.' in torch_spec): issues.append("pytorch-lightning>=2.0 requires torch>=2.0, but torch<2.0 is specified") # FastAPI + Pydantic compatibility if 'fastapi' in nodes and 'pydantic' in nodes: fastapi_spec = nodes['fastapi']['specifier'] pydantic_spec = nodes['pydantic']['specifier'] if '==0.78' in fastapi_spec or '==0.7' in fastapi_spec: if '==2.' in pydantic_spec or '>=2.' in pydantic_spec: issues.append("fastapi==0.78.x requires pydantic v1, but pydantic v2 is specified") # TensorFlow + Keras compatibility if 'tensorflow' in nodes and 'keras' in nodes: tf_spec = nodes['tensorflow']['specifier'] keras_spec = nodes['keras']['specifier'] if '==1.' in tf_spec: if '==3.' in keras_spec or '>=3.' in keras_spec: issues.append("keras>=3.0 requires TensorFlow 2.x, but TensorFlow 1.x is specified") return len(issues) == 0, issues def resolve_dependencies( self, dependencies: List[Dict], strategy: str = "latest_compatible" ) -> Tuple[str, List[str]]: """Resolve dependencies using specified strategy.""" # Remove duplicates and conflicts seen_packages = {} clean_dependencies = [] for dep in dependencies: if dep.get('conflict'): continue package = dep['package'] if package in seen_packages: # Keep the one with more specific version if available existing = seen_packages[package] if dep['specifier'] and not existing['specifier']: clean_dependencies.remove(existing) clean_dependencies.append(dep) seen_packages[package] = dep continue clean_dependencies.append(dep) seen_packages[package] = dep # Create a temporary requirements file with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: req_lines = [] for dep in clean_dependencies: req_lines.append(dep['original']) f.write('\n'.join(req_lines)) temp_req_file = f.name warnings = [] try: # Try using pip's resolver with --dry-run and --report (pip 22.2+) result = subprocess.run( ['pip', 'install', '--dry-run', '--report', '-', '-r', temp_req_file], capture_output=True, text=True, timeout=60 ) if result.returncode == 0 and result.stdout.strip(): # Parse the JSON report try: report = json.loads(result.stdout) resolved = [] for package in report.get('install', []): name = package.get('metadata', {}).get('name', '') version = package.get('metadata', {}).get('version', '') if name and version: resolved.append(f"{name}=={version}") if resolved: return '\n'.join(sorted(resolved)), warnings except json.JSONDecodeError: warnings.append("Could not parse pip resolution report. Using original requirements.") except Exception as e: warnings.append(f"Error parsing resolution: {str(e)}") # Fallback: try pip-compile if available try: result = subprocess.run( ['pip-compile', '--dry-run', '--output-file', '-', temp_req_file], capture_output=True, text=True, timeout=60 ) if result.returncode == 0: return result.stdout.strip(), warnings except FileNotFoundError: pass except Exception: pass # Final fallback: return cleaned original requirements resolved_lines = [] for dep in clean_dependencies: line = dep['original'] # Apply strategy-based modifications if strategy == "stable/pinned" and not dep['specifier']: # In a real implementation, would query PyPI for latest stable line = f"{dep['package']} # Version not specified" elif strategy == "keep_existing_pins": # Keep as-is pass resolved_lines.append(line) if not warnings: warnings.append("Using original requirements. For full resolution, ensure pip>=22.2 is installed.") return '\n'.join(resolved_lines), warnings except subprocess.TimeoutExpired: warnings.append("Resolution timed out. Showing original requirements.") return '\n'.join([d['original'] for d in clean_dependencies]), warnings except Exception as e: warnings.append(f"Resolution error: {str(e)}") return '\n'.join([d['original'] for d in clean_dependencies]), warnings finally: Path(temp_req_file).unlink(missing_ok=True) class CatalogValidator: """Validate package names against a simple ground-truth catalog.""" def __init__(self, catalog_path: Path = Path("data/package_name_catalog.json"), use_ml: bool = True): self.catalog_path = catalog_path self.valid_packages: Set[str] = set() self.invalid_packages: Set[str] = set() self.use_ml = use_ml and ML_AVAILABLE self.embeddings = None self._load_catalog() # Load embeddings if available if self.use_ml: try: self.embeddings = PackageEmbeddings() except Exception as e: print(f"Warning: Could not load embeddings: {e}") self.use_ml = False def _load_catalog(self) -> None: if not self.catalog_path.exists(): return try: data = json.loads(self.catalog_path.read_text()) self.valid_packages = {p.lower() for p in data.get("valid_packages", [])} self.invalid_packages = {p.lower() for p in data.get("invalid_packages", [])} except Exception as exc: # Keep going even if catalog is malformed print(f"Warning: could not read catalog {self.catalog_path}: {exc}") def suggest_correction(self, package_name: str, cutoff: float = 0.6) -> Optional[str]: """Suggest a corrected package name using fuzzy matching and embeddings.""" if not self.valid_packages: return None package_lower = package_name.lower() # If it's already valid, no correction needed if package_lower in self.valid_packages: return None # Try ML-based embedding similarity first (more accurate) if self.use_ml and self.embeddings: try: best_match = self.embeddings.get_best_match(package_name, threshold=0.7) if best_match and best_match in self.valid_packages: return best_match except Exception: pass # Fallback to fuzzy matching matches = get_close_matches( package_lower, list(self.valid_packages), n=1, cutoff=cutoff ) if matches: return matches[0] return None def check_and_correct_packages(self, dependencies: List[Dict], auto_correct: bool = True) -> Tuple[List[Dict], List[str]]: """Check packages and optionally correct spelling mistakes. Returns: Tuple of (corrected_dependencies, warnings) """ corrected_deps = [] warnings: List[str] = [] seen: Set[str] = set() max_warnings = 15 for dep in dependencies: package = dep["package"] package_lower = package.lower() if package_lower in seen: corrected_deps.append(dep) continue seen.add(package_lower) # Check if it's explicitly invalid if self.invalid_packages and package_lower in self.invalid_packages: warnings.append(f"Package '{package}' is flagged as invalid in the catalog.") if len(warnings) >= max_warnings: corrected_deps.append(dep) continue # Try to suggest a correction suggestion = self.suggest_correction(package) if suggestion: if auto_correct: corrected_dep = dep.copy() corrected_dep['package'] = suggestion corrected_dep['original'] = corrected_dep['original'].replace(package, suggestion, 1) corrected_deps.append(corrected_dep) warnings.append(f" → Auto-corrected to '{suggestion}'") else: warnings.append(f" → Did you mean '{suggestion}'?") else: corrected_deps.append(dep) continue # Check if it's not in valid catalog and suggest correction if self.valid_packages and package_lower not in self.valid_packages: suggestion = self.suggest_correction(package) if suggestion: if auto_correct: corrected_dep = dep.copy() corrected_dep['package'] = suggestion corrected_dep['original'] = corrected_dep['original'].replace(package, suggestion, 1) corrected_deps.append(corrected_dep) warnings.append(f"Package '{package}' not found. Auto-corrected to '{suggestion}'") else: warnings.append(f"Package '{package}' not found. Did you mean '{suggestion}'?") if len(warnings) >= max_warnings: break else: warnings.append( f"Package '{package}' is not in the curated valid catalog. Check for typos or private packages." ) corrected_deps.append(dep) if len(warnings) >= max_warnings: break else: # Package is valid, keep as-is corrected_deps.append(dep) if len(warnings) >= max_warnings: warnings.append("Additional potential catalog issues omitted for brevity.") return corrected_deps, warnings def check_packages(self, dependencies: List[Dict]) -> List[str]: """Return warnings for packages that look suspicious or explicitly invalid.""" _, warnings = self.check_and_correct_packages(dependencies, auto_correct=False) return warnings class ProjectRequirementsGenerator: """Generate requirements.txt from project description using LLM.""" def __init__(self, use_llm: bool = True): """ Initialize project requirements generator. Args: use_llm: If True, uses Hugging Face Inference API If False, uses rule-based suggestions """ self.use_llm = use_llm # Using a better model for code generation # Try to use a code generation model, fallback to GPT-2 self.api_url = "https://api-inference.huggingface.co/models/bigcode/starcoder" self.fallback_url = "https://api-inference.huggingface.co/models/gpt2" self.headers = {"Content-Type": "application/json"} def generate_requirements(self, project_description: str) -> Tuple[str, str]: """ Generate requirements.txt from project description. Args: project_description: User's description of their project Returns: Tuple of (requirements_text, explanations_text) """ if not project_description or not project_description.strip(): return "", "" # Always try rule-based first as it's more reliable requirements, explanations = self._rule_based_suggestions(project_description) # Try LLM to enhance the suggestions if enabled if self.use_llm: prompt = self._create_requirements_prompt(project_description) llm_response = self._call_llm_for_requirements(prompt) llm_requirements, llm_explanations = self._parse_llm_response(llm_response) # If LLM generated valid requirements, use them (or merge with rule-based) if llm_requirements and len(llm_requirements.strip()) > 10: # Merge: prefer LLM but keep rule-based if LLM is incomplete if len(llm_requirements) > len(requirements): requirements = llm_requirements explanations = llm_explanations if llm_explanations else explanations else: # Combine both combined = set(requirements.split('\n')) combined.update(llm_requirements.split('\n')) requirements = '\n'.join([r for r in combined if r.strip()]) return requirements, explanations def _create_requirements_prompt(self, description: str) -> str: """Create a prompt for generating requirements.txt.""" prompt = f"""You are a Python expert. Based on this project description, generate a requirements.txt file with appropriate Python packages. Project Description: {description} Generate a requirements.txt file with: 1. Essential packages needed for this project 2. Appropriate version pins where necessary 3. Format: one package per line with version (e.g., "pandas==2.0.3" or "fastapi>=0.100.0") For each package, provide a brief explanation of why it's needed. Format your response as: REQUIREMENTS: package1==version1 package2>=version2 ... EXPLANATIONS: - package1: Brief explanation of why it's needed - package2: Brief explanation of why it's needed ... Keep it practical and focused on the most important dependencies (5-15 packages typically). """ return prompt def _call_llm_for_requirements(self, prompt: str) -> str: """Call LLM API to generate requirements.""" try: # Try the code generation model first payload = { "inputs": prompt, "parameters": { "max_new_tokens": 500, "temperature": 0.3, "return_full_text": False } } response = requests.post( self.api_url, headers=self.headers, json=payload, timeout=15 ) if response.status_code == 200: result = response.json() if isinstance(result, list) and len(result) > 0: generated_text = result[0].get('generated_text', '') if generated_text: return generated_text.strip() # Fallback to GPT-2 response = requests.post( self.fallback_url, headers=self.headers, json=payload, timeout=15 ) if response.status_code == 200: result = response.json() if isinstance(result, list) and len(result) > 0: generated_text = result[0].get('generated_text', '') if generated_text: return generated_text.strip() return "" except Exception as e: print(f"LLM API error: {e}") return "" def _parse_llm_response(self, response: str) -> Tuple[str, str]: """Parse LLM response to extract requirements and explanations.""" if not response: return "", "" requirements = [] explanations = [] # Try to extract REQUIREMENTS section if "REQUIREMENTS:" in response: req_section = response.split("REQUIREMENTS:")[1] if "EXPLANATIONS:" in req_section: req_section = req_section.split("EXPLANATIONS:")[0] for line in req_section.strip().split('\n'): line = line.strip() if line and not line.startswith('#') and not line.startswith('-'): # Clean up the line line = line.split('#')[0].strip() # Remove comments if line and ('==' in line or '>=' in line or '<=' in line or '>' in line or '<' in line or not any(c in line for c in '=<>')): requirements.append(line) # Try to extract EXPLANATIONS section if "EXPLANATIONS:" in response: exp_section = response.split("EXPLANATIONS:")[1] for line in exp_section.strip().split('\n'): line = line.strip() if line and line.startswith('-'): explanations.append(line[1:].strip()) # If parsing failed, try to extract package names from the response if not requirements: # Look for lines that look like package specifications for line in response.split('\n'): line = line.strip() # Check if it looks like a package (has letters, maybe numbers, maybe version) if line and ('==' in line or '>=' in line or '<=' in line): parts = line.split() if parts: requirements.append(parts[0]) requirements_text = '\n'.join(requirements[:20]) # Limit to 20 packages explanations_text = '\n'.join(explanations[:20]) if explanations else "" return requirements_text, explanations_text def _rule_based_suggestions(self, description: str) -> Tuple[str, str]: """Generate rule-based suggestions when LLM is unavailable.""" desc_lower = description.lower() suggestions = [] explanations = [] # RAG / Chatbot / PDF processing if any(word in desc_lower for word in ['rag', 'chatbot', 'pdf', 'document', 'query', 'retrieval']): suggestions.append("streamlit>=1.28.0") suggestions.append("langchain>=0.1.0") suggestions.append("pypdf>=3.17.0") if 'openai' in desc_lower or 'gpt' in desc_lower: suggestions.append("openai>=1.0.0") else: suggestions.append("openai>=1.0.0") suggestions.append("chromadb>=0.4.0") explanations.append("- streamlit: Build interactive web apps for your chatbot interface") explanations.append("- langchain: Framework for building RAG applications") explanations.append("- pypdf: PDF parsing and text extraction") explanations.append("- openai: OpenAI API for LLM integration") explanations.append("- chromadb: Vector database for document embeddings") # Web frameworks if any(word in desc_lower for word in ['web', 'api', 'server', 'backend', 'rest']): suggestions.append("fastapi>=0.100.0") suggestions.append("uvicorn[standard]>=0.23.0") explanations.append("- fastapi: Modern web framework for building APIs") explanations.append("- uvicorn: ASGI server to run FastAPI applications") # Data science if any(word in desc_lower for word in ['data', 'analysis', 'csv', 'excel', 'dataframe', 'pandas']): suggestions.append("pandas>=2.0.0") suggestions.append("numpy>=1.24.0") explanations.append("- pandas: Data manipulation and analysis") explanations.append("- numpy: Numerical computing library") # Machine learning if any(word in desc_lower for word in ['ml', 'machine learning', 'model', 'train', 'neural', 'deep learning', 'ai']): suggestions.append("scikit-learn>=1.3.0") if 'pytorch' in desc_lower or 'torch' in desc_lower: suggestions.append("torch>=2.0.0") explanations.append("- torch: PyTorch deep learning framework") elif 'tensorflow' in desc_lower or 'tf' in desc_lower: suggestions.append("tensorflow>=2.13.0") explanations.append("- tensorflow: TensorFlow deep learning framework") explanations.append("- scikit-learn: Machine learning algorithms and utilities") # Database if any(word in desc_lower for word in ['database', 'sql', 'db', 'postgres', 'mysql']): suggestions.append("sqlalchemy>=2.0.0") explanations.append("- sqlalchemy: SQL toolkit and ORM") # HTTP requests if any(word in desc_lower for word in ['http', 'request', 'fetch', 'download']): suggestions.append("requests>=2.31.0") explanations.append("- requests: HTTP library for making API calls") # Environment variables if any(word in desc_lower for word in ['config', 'env', 'environment', 'settings']): suggestions.append("python-dotenv>=1.0.0") explanations.append("- python-dotenv: Load environment variables from .env file") # If no specific matches, provide common packages if not suggestions: suggestions.append("requests>=2.31.0") suggestions.append("python-dotenv>=1.0.0") explanations.append("- requests: HTTP library for API calls and web requests") explanations.append("- python-dotenv: Manage environment variables and configuration") requirements_text = '\n'.join(suggestions) if suggestions else "" explanations_text = '\n'.join(explanations) if explanations else "" return requirements_text, explanations_text class ExplanationEngine: """Generate intelligent explanations for dependency conflicts using LLM.""" def __init__(self, use_llm: bool = True): """ Initialize explanation engine. Args: use_llm: If True, uses Hugging Face Inference API (free tier) If False, uses rule-based explanations only """ self.use_llm = use_llm # Using Hugging Face Inference API (free tier) self.api_url = "https://api-inference.huggingface.co/models/gpt2" self.headers = {"Content-Type": "application/json"} def generate_explanation(self, conflict: Dict, dependencies: List[Dict]) -> Dict: """ Generate a detailed explanation for a conflict. Args: conflict: Conflict dictionary with type, packages, message, etc. dependencies: Full list of dependencies for context Returns: Dictionary with explanation, why_it_happens, how_to_fix """ # Build context about the conflict conflict_type = conflict.get('type', 'unknown') packages = conflict.get('packages', [conflict.get('package', 'unknown')]) message = conflict.get('message', '') details = conflict.get('details', {}) # Create prompt for LLM prompt = self._create_prompt(conflict, dependencies) # Get LLM explanation explanation_text = self._call_llm(prompt) if self.use_llm else self._fallback_explanation(prompt) # Parse and structure the explanation return { 'summary': message, 'explanation': explanation_text, 'why_it_happens': self._extract_why(explanation_text, conflict), 'how_to_fix': self._extract_fix(explanation_text, conflict), 'packages_involved': packages, 'severity': conflict.get('severity', 'medium') } def _create_prompt(self, conflict: Dict, dependencies: List[Dict]) -> str: """Create a prompt for the LLM.""" conflict_type = conflict.get('type', 'unknown') packages = conflict.get('packages', [conflict.get('package', 'unknown')]) message = conflict.get('message', '') details = conflict.get('details', {}) # Get relevant dependency info relevant_deps = [d for d in dependencies if d['package'] in packages] prompt = f"""You are a Python dependency expert. Explain this dependency conflict clearly: Conflict: {message} Type: {conflict_type} Packages involved: {', '.join(packages)} Dependency details: """ for dep in relevant_deps: prompt += f"- {dep['package']}: {dep['specifier'] or 'no version specified'}\n" if details: prompt += f"\nVersion constraints: {json.dumps(details)}\n" prompt += """ Provide a clear, concise explanation that: 1. Explains what the conflict is in simple terms 2. Explains why this conflict happens (technical reason) 3. Suggests how to fix it (specific version recommendations) Keep it under 150 words and use plain language. """ return prompt def _call_llm(self, prompt: str) -> str: """ Call LLM API to generate explanation. Falls back to rule-based explanation if API fails. """ try: # Try Hugging Face Inference API (free tier) payload = { "inputs": prompt, "parameters": { "max_new_tokens": 200, "temperature": 0.7, "return_full_text": False } } response = requests.post( self.api_url, headers=self.headers, json=payload, timeout=10 ) if response.status_code == 200: result = response.json() if isinstance(result, list) and len(result) > 0: generated_text = result[0].get('generated_text', '') if generated_text: return generated_text.strip() # If API fails, fall back to rule-based return self._fallback_explanation(prompt) except Exception as e: # Fall back to rule-based explanation return self._fallback_explanation(prompt) def _fallback_explanation(self, prompt: str) -> str: """Generate rule-based explanation when LLM is unavailable.""" # Extract key info from prompt if "pytorch-lightning" in prompt.lower() and "torch" in prompt.lower(): return """PyTorch Lightning 2.0+ requires PyTorch 2.0 or higher because it uses new PyTorch APIs and features that don't exist in version 1.x. The conflict happens because you're trying to use a newer version of PyTorch Lightning with an older version of PyTorch. To fix this, either upgrade PyTorch to 2.0+ or downgrade PyTorch Lightning to 1.x.""" elif "fastapi" in prompt.lower() and "pydantic" in prompt.lower(): return """FastAPI 0.78.x was built for Pydantic v1, which has a different API than Pydantic v2. The conflict occurs because Pydantic v2 introduced breaking changes that FastAPI 0.78 doesn't support. To fix this, either upgrade FastAPI to 0.99+ (which supports Pydantic v2) or downgrade Pydantic to v1.x.""" elif "tensorflow" in prompt.lower() and "keras" in prompt.lower(): return """Keras 3.0+ requires TensorFlow 2.x because it was redesigned to work with TensorFlow 2's eager execution and new features. TensorFlow 1.x uses a different execution model that Keras 3.0 doesn't support. To fix this, upgrade TensorFlow to 2.x or downgrade Keras to 2.x.""" elif "duplicate" in prompt.lower(): return """You have the same package specified multiple times with different versions. This creates ambiguity about which version should be installed. To fix this, remove duplicate entries and keep only one version specification per package.""" else: return """This dependency conflict occurs due to incompatible version requirements between packages. Review the version constraints and ensure all packages are compatible with each other. Consider updating to compatible versions or using a dependency resolver.""" def _extract_why(self, explanation: str, conflict: Dict) -> str: """Extract the 'why it happens' part from explanation.""" # Simple extraction - look for sentences explaining the reason sentences = explanation.split('.') why_sentences = [s.strip() for s in sentences if any(word in s.lower() for word in ['because', 'due to', 'requires', 'needs', 'since'])] return '. '.join(why_sentences[:2]) + '.' if why_sentences else "Version constraints are incompatible." def _extract_fix(self, explanation: str, conflict: Dict) -> str: """Extract the 'how to fix' part from explanation.""" # Simple extraction - look for fix suggestions sentences = explanation.split('.') fix_sentences = [s.strip() for s in sentences if any(word in s.lower() for word in ['upgrade', 'downgrade', 'fix', 'change', 'update', 'remove'])] return '. '.join(fix_sentences[:2]) + '.' if fix_sentences else "Adjust version constraints to compatible versions." def process_dependencies( project_description: str, library_list: str, requirements_text: str, uploaded_file, python_version: str, device: str, os_type: str, mode: str, resolution_strategy: str, use_llm_explanations: bool = True, use_ml_prediction: bool = True, use_ml_spellcheck: bool = True, show_ml_details: bool = False ) -> Tuple[str, str, str]: """Main processing function for Gradio interface.""" # Generate requirements from project description if provided generated_requirements = "" generation_explanations = "" if project_description and project_description.strip(): generator = ProjectRequirementsGenerator(use_llm=True) generated_requirements, generation_explanations = generator.generate_requirements(project_description) # If we generated requirements, add them to the requirements_text if generated_requirements: if requirements_text: requirements_text = generated_requirements + "\n" + requirements_text else: requirements_text = generated_requirements # Collect dependencies from all sources all_dependencies = [] # Parse library list if library_list: parser = DependencyParser() deps = parser.parse_library_list(library_list) all_dependencies.extend(deps) # Parse requirements text if requirements_text: parser = DependencyParser() deps = parser.parse_requirements_text(requirements_text) all_dependencies.extend(deps) # Parse uploaded file if uploaded_file: try: # Handle both string paths and file objects (Gradio 6.x compatibility) if isinstance(uploaded_file, str): file_path = uploaded_file else: # If it's a file object, get the path file_path = uploaded_file.name if hasattr(uploaded_file, 'name') else str(uploaded_file) with open(file_path, 'r') as f: content = f.read() parser = DependencyParser() deps = parser.parse_requirements_text(content) all_dependencies.extend(deps) except Exception as e: return f"Error reading file: {str(e)}", "", "" if not all_dependencies: return "Please provide at least one input: library list, requirements text, or uploaded file.", "", "" catalog_validator = CatalogValidator(use_ml=use_ml_spellcheck and ML_AVAILABLE) # Auto-correct spelling mistakes in package names all_dependencies, catalog_warnings = catalog_validator.check_and_correct_packages(all_dependencies, auto_correct=True) # ML-based conflict prediction (pre-analysis) ml_conflict_prediction = None ml_confidence = 0.0 ml_details = "" if use_ml_prediction and ML_AVAILABLE: try: predictor = ConflictPredictor() requirements_text_for_ml = '\n'.join([d['original'] for d in all_dependencies]) has_conflict, confidence = predictor.predict(requirements_text_for_ml) ml_conflict_prediction = has_conflict ml_confidence = confidence # Build ML details output ml_details = f""" ### ML Model Details **Conflict Prediction Model:** - Prediction: {"Conflict Detected" if has_conflict else "No Conflict"} - Confidence: {confidence:.2%} - Model Type: Random Forest Classifier - Features Analyzed: Package presence, version specificity, conflict patterns """ if show_ml_details: # Get feature importance or additional details ml_details += f""" **Raw Prediction:** - Has Conflict: {has_conflict} - Confidence Score: {confidence:.4f} - Probability Distribution: Conflict={confidence:.2%}, No Conflict={1-confidence:.2%} """ if has_conflict and confidence > 0.7: catalog_warnings.append( f"ML Prediction: High probability ({confidence:.1%}) of conflicts detected" ) except Exception as e: print(f"ML prediction error: {e}") ml_details = f"ML Prediction Error: {str(e)}" elif use_ml_prediction and not ML_AVAILABLE: ml_details = "ML models not available. Train models using `train_conflict_model.py` to enable this feature." # Build dependency graph resolver = DependencyResolver(python_version=python_version, platform=os_type, device=device) deep_mode = (mode == "Deep (with transitive dependencies)") graph = resolver.build_dependency_graph(all_dependencies, deep_mode=deep_mode) # Check compatibility is_compatible, issues = resolver.check_compatibility(graph) # Convert string issues to structured format for LLM explanations structured_issues = [] for issue in issues: if isinstance(issue, str): # Parse the issue string to extract package names and type issue_dict = { 'type': 'version_incompatibility', 'message': issue, 'severity': 'high', 'details': {} } # Extract package names from known patterns packages = [] issue_lower = issue.lower() # Check for specific known conflicts if 'pytorch-lightning' in issue_lower and 'torch' in issue_lower: packages = ['pytorch-lightning', 'torch'] issue_dict['type'] = 'version_incompatibility' # Extract version details for dep in all_dependencies: if dep['package'] in packages: issue_dict['details'][dep['package']] = dep.get('specifier', '') elif 'fastapi' in issue_lower and 'pydantic' in issue_lower: packages = ['fastapi', 'pydantic'] issue_dict['type'] = 'version_incompatibility' for dep in all_dependencies: if dep['package'] in packages: issue_dict['details'][dep['package']] = dep.get('specifier', '') elif 'tensorflow' in issue_lower and 'keras' in issue_lower: packages = ['tensorflow', 'keras'] issue_dict['type'] = 'version_incompatibility' for dep in all_dependencies: if dep['package'] in packages: issue_dict['details'][dep['package']] = dep.get('specifier', '') elif 'conflict in' in issue_lower: # Duplicate package conflict pkg = issue.split('Conflict in')[1].split(':')[0].strip() packages = [pkg] issue_dict['type'] = 'duplicate' issue_dict['package'] = pkg else: # Generic: try to find packages mentioned in the issue for dep in all_dependencies: if dep['package'] in issue_lower: packages.append(dep['package']) if packages: issue_dict['packages'] = packages else: issue_dict['package'] = 'unknown' issue_dict['packages'] = [] structured_issues.append(issue_dict) else: structured_issues.append(issue) # Generate LLM explanations if enabled explanations = [] if use_llm_explanations and structured_issues: explanation_engine = ExplanationEngine(use_llm=use_llm_explanations) for issue in structured_issues: try: explanation = explanation_engine.generate_explanation(issue, all_dependencies) explanations.append(explanation) except Exception as e: # If explanation generation fails, just use the issue message explanations.append({ 'summary': issue.get('message', str(issue)), 'explanation': issue.get('message', str(issue)), 'why_it_happens': 'Unable to generate explanation.', 'how_to_fix': 'Review version constraints.', 'packages_involved': issue.get('packages', []), 'severity': issue.get('severity', 'medium') }) # Resolve dependencies resolved_text, resolver_warnings = resolver.resolve_dependencies(all_dependencies, resolution_strategy) warnings = catalog_warnings + resolver_warnings # Build output message output_parts = [] output_parts.append("## Dependency Analysis Results\n\n") # Show generated requirements if project description was provided if project_description and project_description.strip() and generated_requirements: output_parts.append("### Generated Requirements from Project Description\n\n") output_parts.append(f"**Project:** {project_description[:100]}{'...' if len(project_description) > 100 else ''}\n\n") output_parts.append("**Suggested Packages:**\n") output_parts.append("```\n") output_parts.append(generated_requirements) output_parts.append("\n```\n\n") if generation_explanations: output_parts.append("**Why these packages?**\n") output_parts.append(generation_explanations) output_parts.append("\n\n") output_parts.append("---\n\n") # Show ML prediction if available if ML_AVAILABLE and ml_conflict_prediction is not None: if ml_conflict_prediction: output_parts.append(f"### ML Prediction: Potential Conflicts Detected (Confidence: {ml_confidence:.1%})\n\n") else: output_parts.append(f"### ML Prediction: Low Conflict Risk (Confidence: {ml_confidence:.1%})\n\n") if issues: output_parts.append("### Compatibility Issues Found:\n") if explanations: # Show detailed LLM explanations for i, (issue, explanation) in enumerate(zip(issues, explanations), 1): output_parts.append(f"#### Issue #{i}: {explanation['summary']}\n\n") output_parts.append(f"**Explanation:**\n{explanation['explanation']}\n\n") output_parts.append(f"**Why this happens:**\n{explanation['why_it_happens']}\n\n") output_parts.append(f"**How to fix:**\n{explanation['how_to_fix']}\n\n") output_parts.append("---\n\n") else: # Fallback to simple list for issue in issues: output_parts.append(f"- {issue}\n") output_parts.append("\n") # Separate corrections from other warnings corrections = [w for w in warnings if "Auto-corrected" in w or "→" in w] other_warnings = [w for w in warnings if w not in corrections] if corrections: output_parts.append("### Spelling Corrections:\n") for correction in corrections: output_parts.append(f"- {correction}\n") output_parts.append("\n") if other_warnings: output_parts.append("### Warnings:\n") for warning in other_warnings: output_parts.append(f"- {warning}\n") output_parts.append("\n") if is_compatible and not issues: output_parts.append("### No compatibility issues detected!\n\n") output_parts.append(f"### Resolved Requirements ({len(all_dependencies)} packages):\n") output_parts.append("```\n") output_parts.append(resolved_text) output_parts.append("\n```\n") # Add ML details if requested if show_ml_details and ml_details: output_parts.append(ml_details) return ''.join(output_parts), resolved_text, ml_details # Gradio Interface def create_interface(): """Create and return the Gradio interface.""" import gradio as gr with gr.Blocks(title="Python Dependency Compatibility Board") as app: gr.Markdown(""" # Python Dependency Compatibility Board Analyze and resolve Python package dependencies with **AI-powered explanations** and **ML-based conflict prediction**. ## Key Features | Feature | Status | Description | |---------|--------|-------------| | **LLM Requirements Generation** | Active | Generate requirements.txt from project description using AI | | **LLM Reasoning** | Active | AI-powered natural language explanations for conflicts | | **ML Conflict Prediction** | {"Available" if ML_AVAILABLE else "Not Loaded"} | Machine learning model predicts conflicts before analysis | | **Embedding-Based Spell Check** | {"Available" if ML_AVAILABLE else "Not Loaded"} | Semantic similarity matching for package names | | **Auto-Correction** | Active | Automatically fixes spelling mistakes in package names | | **Dependency Resolution** | Active | Resolves conflicts using pip's resolver | """) # Project Description Input (Optional) with gr.Row(): with gr.Column(scale=3): project_description_input = gr.Textbox( label="Project Description (Optional) - AI-Powered Requirements Generation", placeholder="Describe your project idea here...\nExample: 'I want to build a web API for data analysis with machine learning capabilities'", lines=4, info="Describe your project and AI will suggest required libraries with explanations.", value="" ) with gr.Column(scale=1): generate_requirements_btn = gr.Button( "Generate Requirements from Description", variant="secondary", size="lg" ) generated_requirements_display = gr.Markdown( label="Generated Requirements Preview", value="AI-generated requirements preview will appear here after clicking the button above." ) gr.Markdown("---") with gr.Row(): with gr.Column(scale=1): gr.Markdown("### Input Methods") library_input = gr.Textbox( label="Library Names (one per line)", placeholder="pandas\ntorch\nlangchain\nfastapi", lines=5, info="Enter package names, one per line" ) requirements_input = gr.Textbox( label="Requirements.txt Content", placeholder="pandas==2.0.3\ntorch>=2.0.0\nlangchain==0.1.0", lines=10, info="Paste your requirements.txt content here" ) file_upload = gr.File( label="Upload requirements.txt", file_types=[".txt"] ) with gr.Column(scale=1): gr.Markdown("### Environment Settings") python_version = gr.Dropdown( choices=["3.8", "3.9", "3.10", "3.11", "3.12"], value="3.10", label="Python Version", info="Target Python version" ) device = gr.Dropdown( choices=["CPU only", "NVIDIA GPU (CUDA)", "Apple Silicon (MPS)", "Custom / other"], value="CPU only", label="Device", info="Target device/platform" ) os_type = gr.Dropdown( choices=["Any / generic", "Linux (x86_64)", "Windows (x86_64)", "MacOS (Intel)", "MacOS (Apple Silicon)"], value="Any / generic", label="Operating System", info="Target operating system" ) mode = gr.Radio( choices=["Quick (top-level only)", "Deep (with transitive dependencies)"], value="Quick (top-level only)", label="Analysis Mode", info="Quick mode is faster, Deep mode includes all dependencies" ) resolution_strategy = gr.Dropdown( choices=["latest_compatible", "stable/pinned", "keep_existing_pins", "minimal_changes"], value="latest_compatible", label="Resolution Strategy", info="How to resolve version conflicts" ) gr.Markdown("---") gr.Markdown("### AI & ML Features") use_llm = gr.Checkbox( label="**LLM Reasoning** - AI Explanations", value=True, info="Generate intelligent, natural language explanations for conflicts using LLM" ) use_ml_prediction = gr.Checkbox( label="**ML Conflict Prediction**", value=True, info=f"{'Model available - Predicts conflicts before detailed analysis' if ML_AVAILABLE else 'Model not loaded - Train models to enable'}" ) use_ml_spellcheck = gr.Checkbox( label="**ML Spell Check** (Embedding-based)", value=True, info=f"{'Model available - Uses semantic similarity for better corrections' if ML_AVAILABLE else 'Model not loaded - Train models to enable'}" ) show_ml_details = gr.Checkbox( label="Show ML Model Details", value=False, info="Display raw ML predictions and confidence scores" ) process_btn = gr.Button("Analyze & Resolve Dependencies", variant="primary", size="lg") with gr.Row(): output_display = gr.Markdown( label="Analysis Results", value="Results will appear here after processing..." ) with gr.Row(): with gr.Column(scale=2): resolved_output = gr.Textbox( label="Resolved requirements.txt", lines=15, info="Copy this content to use as your requirements.txt file" ) download_btn = gr.File( label="Download requirements.txt", value=None, visible=True ) with gr.Column(scale=1): ml_output = gr.Markdown( label="ML Model Output", value="ML predictions will appear here when enabled...", visible=True ) def generate_requirements_only(project_desc): """Generate requirements from project description only.""" if not project_desc or not project_desc.strip(): return "", "" generator = ProjectRequirementsGenerator(use_llm=True) requirements, explanations = generator.generate_requirements(project_desc) if requirements: output = f"## Generated Requirements\n\n" output += f"**Project:** {project_desc[:100]}{'...' if len(project_desc) > 100 else ''}\n\n" output += "**Suggested Packages:**\n```\n" output += requirements output += "\n```\n\n" if explanations: output += "**Why these packages?**\n" output += explanations # Also return the requirements text for the textbox return output, requirements else: error_msg = "Could not generate requirements. Please try a more detailed description or check your connection." return error_msg, "" def process_and_download(*args): # Extract all arguments result_text, resolved_text, ml_details = process_dependencies(*args) # Create a temporary file for download temp_file = None if resolved_text and resolved_text.strip(): try: with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f: f.write(resolved_text) temp_file = f.name except Exception as e: print(f"Error creating download file: {e}") # Format ML output ml_output_text = ml_details if ml_details else "ML features disabled or models not available." return result_text, resolved_text, temp_file if temp_file else None, ml_output_text # Button to generate requirements from description def generate_and_update(project_desc, existing_reqs): """Generate requirements and update the requirements input.""" if not project_desc or not project_desc.strip(): return "Please enter a project description first.", existing_reqs generator = ProjectRequirementsGenerator(use_llm=True) requirements, explanations = generator.generate_requirements(project_desc) # Check if we got valid requirements (rule-based should always return something) if requirements and requirements.strip() and len(requirements.strip()) > 5: # Create preview output preview = f"## Generated Requirements\n\n" preview += f"**Project:** {project_desc[:100]}{'...' if len(project_desc) > 100 else ''}\n\n" preview += "**Suggested Packages:**\n```\n" preview += requirements preview += "\n```\n\n" if explanations and explanations.strip(): preview += "**Why these packages?**\n" preview += explanations preview += "\n\n*Requirements have been added to the 'Requirements.txt Content' box below. You can edit them before analysis.*" # Update requirements input (append or replace) if existing_reqs and existing_reqs.strip(): updated_reqs = requirements + "\n" + existing_reqs else: updated_reqs = requirements return preview, updated_reqs else: # Fallback - generate basic requirements desc_lower = project_desc.lower() basic_reqs = [] basic_explanations = [] if 'streamlit' in desc_lower or 'web' in desc_lower or 'app' in desc_lower: basic_reqs.append("streamlit>=1.28.0") basic_explanations.append("- streamlit: Build interactive web applications") if 'pdf' in desc_lower or 'document' in desc_lower: basic_reqs.append("pypdf>=3.17.0") basic_explanations.append("- pypdf: PDF parsing and text extraction") if 'rag' in desc_lower or 'chatbot' in desc_lower or 'llm' in desc_lower: basic_reqs.append("langchain>=0.1.0") basic_reqs.append("openai>=1.0.0") basic_explanations.append("- langchain: Framework for building LLM applications") basic_explanations.append("- openai: OpenAI API integration") if basic_reqs: reqs_text = '\n'.join(basic_reqs) exp_text = '\n'.join(basic_explanations) preview = f"## Generated Requirements\n\n**Project:** {project_desc[:100]}\n\n**Suggested Packages:**\n```\n{reqs_text}\n```\n\n**Why these packages?**\n{exp_text}" if existing_reqs and existing_reqs.strip(): updated_reqs = reqs_text + "\n" + existing_reqs else: updated_reqs = reqs_text return preview, updated_reqs error_msg = "## Could not generate requirements\n\nPlease try a more detailed description with keywords like: web, API, data analysis, machine learning, PDF, chatbot, etc." return error_msg, existing_reqs generate_requirements_btn.click( fn=generate_and_update, inputs=[project_description_input, requirements_input], outputs=[generated_requirements_display, requirements_input] ) process_btn.click( fn=process_and_download, inputs=[project_description_input, library_input, requirements_input, file_upload, python_version, device, os_type, mode, resolution_strategy, use_llm, use_ml_prediction, use_ml_spellcheck, show_ml_details], outputs=[output_display, resolved_output, download_btn, ml_output] ) gr.Markdown(""" --- ### How to Use 1. **(Optional) Describe your project** in the "Project Description" box - AI will suggest required libraries 2. **Input your dependencies** using any of the three methods (or combine them) 3. **Configure your environment** (Python version, device, OS) 4. **Enable AI/ML features** (LLM explanations, ML predictions, ML spell-check) 5. **Choose analysis mode**: Quick for fast results, Deep for complete dependency tree 6. **Select resolution strategy**: How to handle version conflicts 7. **Click "Analyze & Resolve Dependencies"** 8. **Review the results** including AI-generated requirements and explanations 9. **Download the resolved requirements.txt** ### Features - **AI Requirements Generation**: Describe your project and get suggested libraries with explanations - Parse multiple input formats - Detect version conflicts - Check compatibility across dependency graph - Resolve dependencies using pip - Generate clean, pip-compatible requirements.txt - Environment-aware (Python version, platform, device) """) return app if __name__ == "__main__": app = create_interface() # For Hugging Face Spaces, use default launch settings # For local development, you can customize app.launch()