Update README.md
Browse files
README.md
CHANGED
|
@@ -69,6 +69,7 @@ Usage
|
|
| 69 |
**Video classification models** are employed in the step **2** of the data curation pipeline to classify a video storyboard as either surgical or non-surgical, the models usage is as follows:
|
| 70 |
```python
|
| 71 |
import torch
|
|
|
|
| 72 |
from PIL import Image
|
| 73 |
from model_loader import build_model
|
| 74 |
|
|
@@ -87,7 +88,14 @@ Usage
|
|
| 87 |
img_path = 'path/to/your/image.jpg'
|
| 88 |
img = Image.open(img_path)
|
| 89 |
img = img.resize((224, 224))
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
# Extract features from the image
|
| 93 |
outputs = net(img_tensor)
|
|
@@ -114,7 +122,14 @@ Usage
|
|
| 114 |
img_path = 'path/to/your/image.jpg'
|
| 115 |
img = Image.open(img_path)
|
| 116 |
img = img.resize((224, 224))
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
|
| 119 |
# Extract features from the image
|
| 120 |
outputs = net(img_tensor)
|
|
@@ -141,7 +156,14 @@ Usage
|
|
| 141 |
img_path = 'path/to/your/image.jpg'
|
| 142 |
img = Image.open(img_path)
|
| 143 |
img = img.resize((224, 224))
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
# Extract features from the image
|
| 147 |
outputs = net(img_tensor)
|
|
|
|
| 69 |
**Video classification models** are employed in the step **2** of the data curation pipeline to classify a video storyboard as either surgical or non-surgical, the models usage is as follows:
|
| 70 |
```python
|
| 71 |
import torch
|
| 72 |
+
import torchvision
|
| 73 |
from PIL import Image
|
| 74 |
from model_loader import build_model
|
| 75 |
|
|
|
|
| 88 |
img_path = 'path/to/your/image.jpg'
|
| 89 |
img = Image.open(img_path)
|
| 90 |
img = img.resize((224, 224))
|
| 91 |
+
transform = torchvision.transforms.Compose([
|
| 92 |
+
torchvision.transforms.ToTensor(),
|
| 93 |
+
torchvision.transforms.Normalize(
|
| 94 |
+
(0.4299694, 0.29676908, 0.27707579),
|
| 95 |
+
(0.24373249, 0.20208984, 0.19319402)
|
| 96 |
+
)
|
| 97 |
+
])
|
| 98 |
+
img_tensor = transform(img).unsqueeze(0).to('cuda')
|
| 99 |
|
| 100 |
# Extract features from the image
|
| 101 |
outputs = net(img_tensor)
|
|
|
|
| 122 |
img_path = 'path/to/your/image.jpg'
|
| 123 |
img = Image.open(img_path)
|
| 124 |
img = img.resize((224, 224))
|
| 125 |
+
transform = torchvision.transforms.Compose([
|
| 126 |
+
torchvision.transforms.ToTensor(),
|
| 127 |
+
torchvision.transforms.Normalize(
|
| 128 |
+
(0.4299694, 0.29676908, 0.27707579),
|
| 129 |
+
(0.24373249, 0.20208984, 0.19319402)
|
| 130 |
+
)
|
| 131 |
+
])
|
| 132 |
+
img_tensor = transform(img).unsqueeze(0).to('cuda')
|
| 133 |
|
| 134 |
# Extract features from the image
|
| 135 |
outputs = net(img_tensor)
|
|
|
|
| 156 |
img_path = 'path/to/your/image.jpg'
|
| 157 |
img = Image.open(img_path)
|
| 158 |
img = img.resize((224, 224))
|
| 159 |
+
transform = torchvision.transforms.Compose([
|
| 160 |
+
torchvision.transforms.ToTensor(),
|
| 161 |
+
torchvision.transforms.Normalize(
|
| 162 |
+
(0.4299694, 0.29676908, 0.27707579),
|
| 163 |
+
(0.24373249, 0.20208984, 0.19319402)
|
| 164 |
+
)
|
| 165 |
+
])
|
| 166 |
+
img_tensor = transform(img).unsqueeze(0).to('cuda')
|
| 167 |
|
| 168 |
# Extract features from the image
|
| 169 |
outputs = net(img_tensor)
|