Spaces:
Runtime error
Runtime error
AfnanSD commited on
Commit ·
5b03208
1
Parent(s): 9db0c45
add files
Browse files- .gitattributes +1 -0
- app.py +75 -0
- examples/id_1689_label_5.png +0 -0
- examples/id_3231_label_20.png +0 -0
- examples/id_9633_label_1.png +0 -0
- examples/id_9_label_2.png +0 -0
- model.py +42 -0
- requirements.txt +3 -0
- vit_model.pth +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
vit_model.pth filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### 1. Imports and class names setup ###
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from model import create_vit_model
|
| 7 |
+
from timeit import default_timer as timer
|
| 8 |
+
from typing import Tuple, Dict
|
| 9 |
+
|
| 10 |
+
# Setup class names
|
| 11 |
+
class_names = ['أ','ب','ت','ث','ج','ح','خ','د','ذ','ر','ز','س','ش','ص','ض','ط','ظ','ع','غ','ف','ق','ك','ل','م','ن','ه','و','ي']
|
| 12 |
+
|
| 13 |
+
### Model and transforms preparation ###
|
| 14 |
+
|
| 15 |
+
# Create ViTB16 model
|
| 16 |
+
vit, vit_transforms = create_vit_model(
|
| 17 |
+
len(class_names)
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
# Load saved weights
|
| 21 |
+
vit.load_state_dict(
|
| 22 |
+
torch.load(
|
| 23 |
+
f="vit_model.pth",
|
| 24 |
+
map_location=torch.device("cpu"), # load to CPU
|
| 25 |
+
)
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
### Predict function ###
|
| 29 |
+
|
| 30 |
+
# Create predict function
|
| 31 |
+
def predict(img) -> Tuple[Dict, float]:
|
| 32 |
+
"""Transforms and performs a prediction on img and returns prediction and time taken.
|
| 33 |
+
"""
|
| 34 |
+
# Start the timer
|
| 35 |
+
start_time = timer()
|
| 36 |
+
|
| 37 |
+
# Transform the target image and add a batch dimension
|
| 38 |
+
img = vit_transforms(img).unsqueeze(0)
|
| 39 |
+
|
| 40 |
+
# Put model into evaluation mode and turn on inference mode
|
| 41 |
+
vit.eval()
|
| 42 |
+
with torch.inference_mode():
|
| 43 |
+
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
|
| 44 |
+
pred_probs = torch.softmax(vit(img), dim=1)
|
| 45 |
+
|
| 46 |
+
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
|
| 47 |
+
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
| 48 |
+
|
| 49 |
+
# Calculate the prediction time
|
| 50 |
+
pred_time = round(timer() - start_time, 5)
|
| 51 |
+
|
| 52 |
+
# Return the prediction dictionary and prediction time
|
| 53 |
+
return pred_labels_and_probs, pred_time
|
| 54 |
+
|
| 55 |
+
### Gradio app ###
|
| 56 |
+
|
| 57 |
+
# Create title, description and article strings
|
| 58 |
+
title = "Arabic Handwritten Characters Recognition"
|
| 59 |
+
description = "A ViTB16 feature extractor computer vision model to classify hand written Arabic letters."
|
| 60 |
+
|
| 61 |
+
# Create examples list from "examples/" directory
|
| 62 |
+
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
| 63 |
+
|
| 64 |
+
# Create the Gradio demo
|
| 65 |
+
demo = gr.Interface(fn=predict, # mapping function from input to output
|
| 66 |
+
inputs=gr.Image(type="pil"), # what are the inputs?
|
| 67 |
+
outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
|
| 68 |
+
gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
|
| 69 |
+
# Create examples list from "examples/" directory
|
| 70 |
+
examples=example_list,
|
| 71 |
+
title=title,
|
| 72 |
+
description=description)
|
| 73 |
+
|
| 74 |
+
# Launch the demo!
|
| 75 |
+
demo.launch()
|
examples/id_1689_label_5.png
ADDED
|
examples/id_3231_label_20.png
ADDED
|
examples/id_9633_label_1.png
ADDED
|
examples/id_9_label_2.png
ADDED
|
model.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
|
| 4 |
+
from torch import nn
|
| 5 |
+
|
| 6 |
+
def create_vit_model(num_classes:int=28,
|
| 7 |
+
seed:int=42):
|
| 8 |
+
"""Creates an ViTB16 feature extractor model and transforms.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
num_classes (int, optional): number of classes in the classifier head.
|
| 12 |
+
Defaults to 28.
|
| 13 |
+
seed (int, optional): random seed value. Defaults to 42.
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
model (torch.nn.Module): ViTB16 feature extractor model.
|
| 17 |
+
transforms (torchvision.transforms): ViTB16 image transforms.
|
| 18 |
+
"""
|
| 19 |
+
# Create ViTB16 pretrained weights, transforms and model
|
| 20 |
+
weights = torchvision.models.ViT_B_16_Weights.DEFAULT
|
| 21 |
+
|
| 22 |
+
# Get transforms from weights
|
| 23 |
+
vit_transforms = weights.transforms()
|
| 24 |
+
|
| 25 |
+
# Extend the vit_transforms to include grayscale conversion, since vit is trained on 3-channel RGB
|
| 26 |
+
transforms = transforms.Compose([
|
| 27 |
+
transforms.Grayscale(num_output_channels=3), # Convert grayscale to 3-channel RGB
|
| 28 |
+
vit_transforms # Append the existing transforms
|
| 29 |
+
])
|
| 30 |
+
# transforms = weights.transforms()
|
| 31 |
+
model = torchvision.models.vit_b_16(weights=weights)
|
| 32 |
+
|
| 33 |
+
# Freeze all layers in base model
|
| 34 |
+
for param in model.parameters():
|
| 35 |
+
param.requires_grad = False
|
| 36 |
+
|
| 37 |
+
# Change heads with random seed for reproducibility
|
| 38 |
+
model.heads = torch.nn.Sequential(
|
| 39 |
+
nn.Linear(in_features=768,
|
| 40 |
+
out_features=28, # Number of Arabic letters = our classes
|
| 41 |
+
bias=True).to(device))
|
| 42 |
+
return model, transforms
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
torchvision
|
| 3 |
+
gradio
|
vit_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8cd7bf531b4719876c51543e83e7c782b3eeeaf1f2cadbf1110ecc7e57308eee
|
| 3 |
+
size 343341370
|