Shadow0482 commited on
Commit
904babc
·
verified ·
1 Parent(s): 704fecc

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +62 -14
README.md CHANGED
@@ -28,36 +28,84 @@ tags:
28
  - RAM usage: ~150-220 MB
29
  - Speed: ~0.8–1.5 seconds per image on CPU
30
 
31
- ### Quick test code
32
 
33
  ```python
34
- pip install -q onnxruntime torchvision matplotlib
35
- ```
 
 
36
 
37
- ```python
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  import onnxruntime as ort
39
  import numpy as np
40
  from PIL import Image
41
  import torchvision.transforms as transforms
 
 
42
 
43
- session = ort.InferenceSession("iris-vit.onnx",
44
- providers=["CPUExecutionProvider"])
45
 
 
46
  transform = transforms.Compose([
47
  transforms.Resize((224, 224)),
48
  transforms.ToTensor(),
49
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
 
50
  ])
51
 
52
- img = Image.open("Your_image").convert("RGB")
53
- input_tensor = transform(img).unsqueeze(0).numpy().astype(np.float32)
54
 
55
- output = session.run(None, {"input": input_tensor})[0][0]
56
- probs = np.exp(output) / np.sum(np.exp(output))
57
- pred_idx = np.argmax(probs)
 
 
58
 
59
- classes = ["No DR", "Mild DR", "Moderate DR", "Severe DR", "Proliferative DR"]
60
- print(f"✅ Predicted: {classes[pred_idx]} ({probs[pred_idx]*100:.1f}%)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  ```
62
 
63
  **License**: MIT
 
28
  - RAM usage: ~150-220 MB
29
  - Speed: ~0.8–1.5 seconds per image on CPU
30
 
31
+ ### Quick test code for colab
32
 
33
  ```python
34
+ # ============================
35
+ # 1. Install dependencies
36
+ # ============================
37
+ !pip install -q onnxruntime huggingface_hub pillow torchvision matplotlib
38
 
39
+ # ============================
40
+ # 2. Download the ONNX model
41
+ # ============================
42
+ from huggingface_hub import hf_hub_download
43
+
44
+ print("📥 Downloading iris-vit.onnx ...")
45
+ model_path = hf_hub_download(
46
+ repo_id="Shadow0482/iris-onnx",
47
+ filename="iris-vit.onnx"
48
+ )
49
+ print(f"✅ Model downloaded: {model_path}")
50
+
51
+ # ============================
52
+ # 3. Load model & define inference
53
+ # ============================
54
  import onnxruntime as ort
55
  import numpy as np
56
  from PIL import Image
57
  import torchvision.transforms as transforms
58
+ import matplotlib.pyplot as plt
59
+ from google.colab import files
60
 
61
+ # Load ONNX session (CPU is fine & fast for this ~105 MB model)
62
+ session = ort.InferenceSession(model_path, providers=["CPUExecutionProvider"])
63
 
64
+ # Preprocessing (exactly what the model expects)
65
  transform = transforms.Compose([
66
  transforms.Resize((224, 224)),
67
  transforms.ToTensor(),
68
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
69
+ std=[0.229, 0.224, 0.225])
70
  ])
71
 
72
+ print("✅ Model loaded successfully!")
 
73
 
74
+ # ============================
75
+ # 4. Upload a fundus image & run inference
76
+ # ============================
77
+ print("\n📤 Please upload a color fundus/retina image (JPG/PNG)...")
78
+ uploaded = files.upload()
79
 
80
+ if uploaded:
81
+ img_path = list(uploaded.keys())[0]
82
+ img = Image.open(img_path).convert("RGB")
83
+
84
+ # Preprocess
85
+ input_tensor = transform(img).unsqueeze(0).numpy().astype(np.float32)
86
+
87
+ # Inference
88
+ outputs = session.run(None, {"input": input_tensor})[0][0]
89
+
90
+ # Softmax
91
+ exp_scores = np.exp(outputs)
92
+ probs = exp_scores / np.sum(exp_scores)
93
+ pred_idx = np.argmax(probs)
94
+
95
+ classes = ["No DR", "Mild DR", "Moderate DR", "Severe DR", "Proliferative DR"]
96
+
97
+ print(f"\n🎯 **Prediction:** {classes[pred_idx]}")
98
+ print(f" Confidence: {probs[pred_idx]*100:.1f}%")
99
+ print("\n📊 Full probabilities:")
100
+ for name, p in zip(classes, probs):
101
+ print(f" {name:20} → {p*100:5.1f}%")
102
+
103
+ # Show image
104
+ plt.figure(figsize=(8, 6))
105
+ plt.imshow(img)
106
+ plt.title(f"Predicted: {classes[pred_idx]} ({probs[pred_idx]*100:.1f}%)", fontsize=14)
107
+ plt.axis("off")
108
+ plt.show()
109
  ```
110
 
111
  **License**: MIT