FoodVisionBig / app.py
wisedew85's picture
FoodVision Big initial
22d0a2f verified
###1. 라이브러리, 클래스 이름 불러오기
import gradio as gr
import os
import torch
from model import create_effnetb1_model
from timeit import default_timer as timer
from typing import Tuple, Dict
with open("class_names.txt", "r") as f:
class_names = [food_name.strip() for food_name in f.readlines()]
### 2. Model, transforms 준비
effnetb1, effnetb1_transforms = create_effnetb1_model(num_classes=101)
#load saved weights
effnetb1.load_state_dict(
torch.load(f="09_pretrained_effentb1_feature_extractor_food_101_20_percent.pth",
map_location=torch.device("cpu"))
)
### 3. Predict functions
def predict(img) -> Tuple[Dict, float]:
#timer 시작
start_time = timer()
#image effnetb1 입력형태로 변환
img = effnetb1_transforms(img).unsqueeze(0) #batch dimension 0번째 차원에 더하기
#예측하기
effnetb1.eval()
with torch.inference_mode():
#image -> prediction logits -> prediction probability
pred_probs = torch.softmax(effnetb1(img), dim=1)
#prediction label, pred probability dictionary
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
#예측 시간 계산
end_time = timer()
pred_time = round(end_time-start_time, 4)
#pred dict, pred time 돌려주기
return pred_labels_and_probs, pred_time
### 4. Gradio app ###
title = "FoodVision Big"
description = "An EfficientNetB1 feature extractor for 101 classes of food "
article = "Created at 09. PyTorch Model Deployment."
#example list를 demo app 내부 경로로 수정
#예시 그림의 파일 경로 가져오기!
example_list = [["examples/" + example] for example in os.listdir("examples")]
example_list
#Gradio demo
demo = gr.Interface(fn=predict,
inputs=gr.Image(type="pil"),
outputs=[gr.Label(num_top_classes=3, label="Predictions"),
gr.Number(label="Prediction time (s)")],
examples= example_list,
title=title,
description=description,
article=article)
#데모 실행
demo.launch(debug=False) #디버그 방식 끄기