MEYTI BECI BAGUNDA
commited on
Commit
·
9087ee6
1
Parent(s):
472fb0c
Update 4 files
Browse files- /src/models/clip_vit.py
- /src/models/resnet_50.py
- /src/models/mobilenet_v3.py
- /src/classification_model.py
- src/classification_model.py +6 -1
- src/models/clip_vit.py +29 -4
- src/models/mobilenet_v3.py +33 -2
- src/models/resnet_50.py +36 -0
src/classification_model.py
CHANGED
|
@@ -4,6 +4,9 @@ from PIL import Image
|
|
| 4 |
from .data.model_data import ModelData
|
| 5 |
from .models.mobilenet_v3 import MobilenetV3
|
| 6 |
from .models.clip_vit import ClipVit
|
|
|
|
|
|
|
|
|
|
| 7 |
from .data.classification_result import ClassificationResult
|
| 8 |
|
| 9 |
class ClassificationModel:
|
|
@@ -26,7 +29,9 @@ class ClassificationModel:
|
|
| 26 |
def load_model(self):
|
| 27 |
self.models = [
|
| 28 |
ModelData('clip-vit-base-patch32', model_class=ClipVit()),
|
| 29 |
-
ModelData('mobilenet_v3', model_class=MobilenetV3())
|
|
|
|
|
|
|
| 30 |
]
|
| 31 |
|
| 32 |
def classify(self, model_name, image) -> List[ClassificationResult]:
|
|
|
|
| 4 |
from .data.model_data import ModelData
|
| 5 |
from .models.mobilenet_v3 import MobilenetV3
|
| 6 |
from .models.clip_vit import ClipVit
|
| 7 |
+
from .models.google_vit import GoogleVit
|
| 8 |
+
from .models.resnet_50 import Resnet50
|
| 9 |
+
|
| 10 |
from .data.classification_result import ClassificationResult
|
| 11 |
|
| 12 |
class ClassificationModel:
|
|
|
|
| 29 |
def load_model(self):
|
| 30 |
self.models = [
|
| 31 |
ModelData('clip-vit-base-patch32', model_class=ClipVit()),
|
| 32 |
+
ModelData('mobilenet_v3', model_class=MobilenetV3()),
|
| 33 |
+
ModelData('google-vit-base-patch16-224', model_class=GoogleVit()),
|
| 34 |
+
ModelData('microsoft/resnet-50', model_class=Resnet50())
|
| 35 |
]
|
| 36 |
|
| 37 |
def classify(self, model_name, image) -> List[ClassificationResult]:
|
src/models/clip_vit.py
CHANGED
|
@@ -1,12 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from typing import List
|
| 2 |
from src.interface import ModelInterface
|
| 3 |
from src.data.classification_result import ClassificationResult
|
| 4 |
|
| 5 |
class ClipVit(ModelInterface):
|
| 6 |
def __init__(self):
|
| 7 |
-
print('
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
def classify_image(self, image) -> List[ClassificationResult]:
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 4 |
from typing import List
|
| 5 |
from src.interface import ModelInterface
|
| 6 |
from src.data.classification_result import ClassificationResult
|
| 7 |
|
| 8 |
class ClipVit(ModelInterface):
|
| 9 |
def __init__(self):
|
| 10 |
+
print('Initializing CLIP VIT model...')
|
| 11 |
+
# Load pre-trained CLIP model and processor
|
| 12 |
+
self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 13 |
+
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 14 |
|
| 15 |
def classify_image(self, image) -> List[ClassificationResult]:
|
| 16 |
+
# Preprocess the image using CLIPProcessor
|
| 17 |
+
inputs = self.processor(text=["volcano", "mountain", "alp", "mount", "valley"], images=image, return_tensors="pt", padding=True)
|
| 18 |
+
|
| 19 |
+
# Perform inference
|
| 20 |
+
outputs = self.model(**inputs)
|
| 21 |
+
logits_per_image = outputs.logits_per_image # This is the image-text similarity score
|
| 22 |
+
|
| 23 |
+
# Convert logits to probabilities using softmax (using PyTorch)
|
| 24 |
+
probabilities = torch.nn.functional.softmax(logits_per_image, dim=1).detach().numpy()
|
| 25 |
+
|
| 26 |
+
# Get the top 5 predicted classes and their probabilities using torch.argsort
|
| 27 |
+
top_indices = torch.argsort(torch.from_numpy(probabilities), dim=1, descending=True)[0, :5]
|
| 28 |
+
top_indices = top_indices.tolist()
|
| 29 |
+
top_probabilities = probabilities[0, top_indices]
|
| 30 |
+
|
| 31 |
+
# Get the class labels from the processor's tokenizer
|
| 32 |
+
class_name = ["volcano", "mountain", "alp", "mount", "valley"]
|
| 33 |
+
|
| 34 |
+
# Create a list of ClassificationResult objects with predicted classes and probabilities
|
| 35 |
+
result = [ClassificationResult(class_name=str(name), confidence=float(probabilities)) for name, probabilities in zip(class_name, top_probabilities)]
|
| 36 |
+
|
| 37 |
+
return result
|
src/models/mobilenet_v3.py
CHANGED
|
@@ -1,13 +1,44 @@
|
|
| 1 |
from typing import List
|
| 2 |
-
import
|
|
|
|
| 3 |
from src.interface import ModelInterface
|
| 4 |
from src.data.classification_result import ClassificationResult
|
|
|
|
|
|
|
| 5 |
|
| 6 |
class MobilenetV3(ModelInterface):
|
| 7 |
|
| 8 |
def __init__(self):
|
| 9 |
print('init... mobilenet v3 model')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
def classify_image(self, image) -> List[ClassificationResult]:
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
return results
|
|
|
|
| 1 |
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
import timm
|
| 4 |
from src.interface import ModelInterface
|
| 5 |
from src.data.classification_result import ClassificationResult
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import urllib.request
|
| 8 |
|
| 9 |
class MobilenetV3(ModelInterface):
|
| 10 |
|
| 11 |
def __init__(self):
|
| 12 |
print('init... mobilenet v3 model')
|
| 13 |
+
self.model = timm.create_model('mobilenetv3_large_100', pretrained=True).eval()
|
| 14 |
+
|
| 15 |
+
# Download and read class labels
|
| 16 |
+
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
|
| 17 |
+
urllib.request.urlretrieve(url, filename)
|
| 18 |
+
with open(filename, "r") as f:
|
| 19 |
+
self.class_labels = [s.strip() for s in f.readlines()]
|
| 20 |
|
| 21 |
def classify_image(self, image) -> List[ClassificationResult]:
|
| 22 |
+
|
| 23 |
+
# Get model-specific transforms (normalization, resize)
|
| 24 |
+
data_config = timm.data.resolve_model_data_config(self.model)
|
| 25 |
+
transforms = timm.data.create_transform(**data_config, is_training=False)
|
| 26 |
+
input_tensor = transforms(image).unsqueeze(0)
|
| 27 |
+
|
| 28 |
+
# Perform inference
|
| 29 |
+
with torch.no_grad():
|
| 30 |
+
output = self.model(input_tensor)
|
| 31 |
+
|
| 32 |
+
# Get the top 5 predictions
|
| 33 |
+
probabilities, top5_class_indices = torch.topk(output.softmax(dim=1), k=5)
|
| 34 |
+
|
| 35 |
+
# Create ClassificationResult objects with confidence information
|
| 36 |
+
results = [
|
| 37 |
+
ClassificationResult(
|
| 38 |
+
class_name=self.class_labels[top5_class_indices[0][i].item()],
|
| 39 |
+
confidence=probabilities[0][i].item()
|
| 40 |
+
)
|
| 41 |
+
for i in range(5)
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
return results
|
src/models/resnet_50.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
from src.interface import ModelInterface
|
| 3 |
+
from src.data.classification_result import ClassificationResult
|
| 4 |
+
from transformers import AutoImageProcessor, ResNetForImageClassification
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
class Resnet50(ModelInterface):
|
| 8 |
+
def __init__(self):
|
| 9 |
+
print('init... clip vit model')
|
| 10 |
+
self.processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
|
| 11 |
+
self.model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50")
|
| 12 |
+
|
| 13 |
+
def classify_image(self, image) -> List[ClassificationResult]:
|
| 14 |
+
# Preprocess the image
|
| 15 |
+
inputs = self.processor(images=image, return_tensors="pt")
|
| 16 |
+
|
| 17 |
+
# Perform inference
|
| 18 |
+
outputs = self.model(**inputs)
|
| 19 |
+
logits = outputs.logits.detach().numpy()
|
| 20 |
+
|
| 21 |
+
# Convert logits to probabilities using softmax (using PyTorch)
|
| 22 |
+
probabilities = torch.nn.functional.softmax(torch.from_numpy(logits), dim=-1).numpy()
|
| 23 |
+
|
| 24 |
+
# Get the top 5 predictions
|
| 25 |
+
top_5 = torch.argsort(torch.from_numpy(probabilities), axis=-1, descending=True)[0][:5].numpy()
|
| 26 |
+
|
| 27 |
+
# Create ClassificationResult objects with confidence information
|
| 28 |
+
results = [
|
| 29 |
+
ClassificationResult(
|
| 30 |
+
class_name=self.model.config.id2label[top_5[i]],
|
| 31 |
+
confidence=float(probabilities[0][top_5[i]])
|
| 32 |
+
)
|
| 33 |
+
for i in range(5)
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
return results
|