Spaces:
Sleeping
Sleeping
AutoDeploy
Fix: Python 3.8 compatibility (use Tuple from typing) + Gradio 4.48.1 security update
8f59aab
| #!/usr/bin/env python | |
| """ | |
| Diagnostic script - Kiểm tra tất cả lỗi trước khi chạy app | |
| """ | |
| import sys | |
| import os | |
| print("\n" + "="*70) | |
| print("🔍 DIAGNOSTIC CHECK - Medical Image Segmentation App") | |
| print("="*70) | |
| # 1. Check Python version | |
| print("\n1️⃣ Python Version:") | |
| print(f" Version: {sys.version}") | |
| if sys.version_info >= (3, 8): | |
| print(" ✅ OK (>= 3.8)") | |
| else: | |
| print(" ❌ FAIL (need >= 3.8)") | |
| sys.exit(1) | |
| # 2. Check required modules | |
| print("\n2️⃣ Checking Required Modules:") | |
| required_modules = [ | |
| 'torch', | |
| 'torchvision', | |
| 'transformers', | |
| 'gradio', | |
| 'numpy', | |
| 'PIL' | |
| ] | |
| missing_modules = [] | |
| for module in required_modules: | |
| try: | |
| __import__(module) | |
| print(f" ✅ {module}") | |
| except ImportError as e: | |
| print(f" ❌ {module}: {e}") | |
| missing_modules.append(module) | |
| if missing_modules: | |
| print(f"\n❌ Missing modules: {', '.join(missing_modules)}") | |
| print("Install with: pip install " + " ".join(missing_modules)) | |
| sys.exit(1) | |
| # 3. Check model files | |
| print("\n3️⃣ Checking Model Files:") | |
| model_path = os.path.join(os.getcwd(), "segformer_trained_weights") | |
| if os.path.exists(model_path): | |
| print(f" ✅ Model path exists: {model_path}") | |
| files = os.listdir(model_path) | |
| print(f" Files in model dir: {files}") | |
| if "pytorch_model.bin" in files: | |
| print(" ✅ pytorch_model.bin found") | |
| else: | |
| print(" ⚠️ pytorch_model.bin NOT found") | |
| if "config.json" in files: | |
| print(" ✅ config.json found") | |
| else: | |
| print(" ⚠️ config.json NOT found") | |
| else: | |
| print(f" ❌ Model path NOT found: {model_path}") | |
| # 4. Check samples directory | |
| print("\n4️⃣ Checking Sample Images:") | |
| samples_path = os.path.join(os.getcwd(), "samples") | |
| if os.path.exists(samples_path): | |
| sample_files = os.listdir(samples_path) | |
| sample_count = len([f for f in sample_files if f.endswith('.png')]) | |
| print(f" ✅ Samples directory exists") | |
| print(f" Found {sample_count} PNG images") | |
| else: | |
| print(f" ⚠️ Samples directory NOT found") | |
| # 5. Try importing app modules | |
| print("\n5️⃣ Testing App Imports:") | |
| try: | |
| import torch | |
| print(" ✅ torch") | |
| except ImportError as e: | |
| print(f" ❌ torch: {e}") | |
| sys.exit(1) | |
| try: | |
| import torch.nn.functional as F | |
| print(" ✅ torch.nn.functional") | |
| except ImportError as e: | |
| print(f" ❌ torch.nn.functional: {e}") | |
| sys.exit(1) | |
| try: | |
| import torchvision.transforms as TF | |
| print(" ✅ torchvision.transforms") | |
| except ImportError as e: | |
| print(f" ❌ torchvision.transforms: {e}") | |
| sys.exit(1) | |
| try: | |
| from transformers import SegformerForSemanticSegmentation | |
| print(" ✅ transformers.SegformerForSemanticSegmentation") | |
| except ImportError as e: | |
| print(f" ❌ transformers: {e}") | |
| sys.exit(1) | |
| try: | |
| import gradio as gr | |
| print(" ✅ gradio") | |
| except ImportError as e: | |
| print(f" ❌ gradio: {e}") | |
| sys.exit(1) | |
| try: | |
| from PIL import Image | |
| print(" ✅ PIL.Image") | |
| except ImportError as e: | |
| print(f" ❌ PIL: {e}") | |
| sys.exit(1) | |
| # 6. Try loading the model | |
| print("\n6️⃣ Testing Model Loading:") | |
| try: | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| print(f" Device: {device}") | |
| model = SegformerForSemanticSegmentation.from_pretrained( | |
| model_path, | |
| num_labels=4, | |
| ignore_mismatched_sizes=True | |
| ) | |
| model.to(device) | |
| model.eval() | |
| print(" ✅ Model loaded successfully") | |
| print(f" Model parameters: {sum(p.numel() for p in model.parameters())/1e6:.1f}M") | |
| except Exception as e: | |
| print(f" ❌ Model loading failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| sys.exit(1) | |
| # 7. Test preprocessing | |
| print("\n7️⃣ Testing Preprocessing:") | |
| try: | |
| preprocess = TF.Compose([ | |
| TF.Resize(size=(288, 288)), | |
| TF.ToTensor(), | |
| TF.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), inplace=True), | |
| ]) | |
| print(" ✅ Preprocessing pipeline created") | |
| except Exception as e: | |
| print(f" ❌ Preprocessing failed: {e}") | |
| sys.exit(1) | |
| # 8. Test with dummy input | |
| print("\n8️⃣ Testing Inference with Dummy Input:") | |
| try: | |
| with torch.no_grad(): | |
| dummy = torch.randn(1, 3, 288, 288).to(device) | |
| output = model(pixel_values=dummy) | |
| print(" ✅ Model forward pass successful") | |
| print(f" Output shape: {output.logits.shape}") | |
| except Exception as e: | |
| print(f" ❌ Model inference failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| sys.exit(1) | |
| # 9. Check app.py syntax | |
| print("\n9️⃣ Checking app.py Syntax:") | |
| try: | |
| with open("app.py", "r", encoding="utf-8") as f: | |
| code = f.read() | |
| compile(code, "app.py", "exec") | |
| print(" ✅ app.py syntax OK") | |
| except SyntaxError as e: | |
| print(f" ❌ Syntax error: {e}") | |
| sys.exit(1) | |
| print("\n" + "="*70) | |
| print("✅ ALL CHECKS PASSED - App should run successfully!") | |
| print("="*70) | |
| print("\n🚀 You can now run: python app.py\n") | |