Sandhya2002 commited on
Commit
898be60
·
verified ·
1 Parent(s): 2a9c752

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,14 @@
1
  ---
 
 
 
 
 
 
 
 
2
  license: mit
 
3
  ---
 
 
 
1
  ---
2
+ title: Food Vision Mini
3
+ emoji: 💻
4
+ colorFrom: green
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 5.26.0
8
+ app_file: app.py
9
+ pinned: false
10
  license: mit
11
+ short_description: An EfficientNetB2 feature extractor computer vision model to
12
  ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import torch
4
+ from model import create_model
5
+ from timeit import default_timer as timer
6
+ class_names=["Pizza","Steak","Sushi"]
7
+ effnetb2,effnetb2_transform=create_model(num_class=3)
8
+ effnetb2.load_state_dict(torch.load(f="effnetb2_pizza_steak_sushi (1).pt",map_location=torch.device('cpu')))
9
+
10
+
11
+ def predict(img):
12
+ start_time=timer()
13
+ img=effnetb2_transform(img).unsqueeze(0)
14
+ effnetb2.eval()
15
+ with torch.inference_mode():
16
+ predict_logit=effnetb2(img)
17
+ predict_prob=torch.softmax(predict_logit,dim=1)
18
+ predict_label=torch.argmax(predict_prob,dim=1)
19
+ pred_label_prob={class_names[i]:float(predict_prob[0][i]) for i in range(len(class_names))}
20
+ end_time=timer()
21
+ pred_time=round(end_time-start_time,4)
22
+ return pred_label_prob,pred_time
23
+
24
+ title="Food Vision Mini 🍕🥩🍣"
25
+ description="An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
26
+ example_list=[['examples/'+example] for example in os.listdir("examples")]
27
+ demo=gr.Interface(fn=predict,inputs=gr.Image(type="pil"),outputs=[gr.Label(num_top_classes=3,label="Prediction"),gr.Number(label="Prediction time")],examples=example_list,title=title,description=description)
28
+ demo.launch()
effnetb2_pizza_steak_sushi (1).pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee17107d6aa14ae47b961ab7b51e0a0a9c9ba04b5e35bf2fa546cca1d15677b5
3
+ size 31284026
examples/289822.jpg ADDED
examples/2901001.jpg ADDED
examples/796922.jpg ADDED
model.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torchvision import transforms
4
+ from torch import nn
5
+ def create_model(num_class:int=3,seed:int=42):
6
+ weights=torchvision.models.EfficientNet_B2_Weights.DEFAULT
7
+ transform=weights.transforms()
8
+ model=torchvision.models.efficientnet_b2(weights=weights)
9
+ for params in model.parameters():
10
+ params.requires_grad=False
11
+ torch.manual_seed(seed)
12
+ model.classifier=nn.Sequential(nn.Dropout(p=0.3, inplace=True),nn.Linear(in_features=1408, out_features=num_class, bias=True))
13
+ return model,transform
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio==5.26.0
2
+ torch==2.6.0
3
+ torchinfo==1.8.0
4
+ torchmetrics==1.7.0
5
+ torchvision==0.21.0
6
+ tqdm==4.66.5
7
+ transformers==4.50.3