| import torch |
| import torchvision |
| import TractionModel as plup |
| import gradio as gr |
|
|
|
|
| def init_model(path): |
| model = plup.create_model() |
| model = plup.load_weights(model, path) |
| model.eval() |
| return model |
|
|
|
|
| def inference(image): |
| image = vanilla_transform(image).to(device).unsqueeze(0) |
| with torch.no_grad(): |
| pred = model(image) |
| res = float(torch.sigmoid(pred[1].to("cpu")).numpy()[0]) |
| return {'pull-up': res, 'no pull-up': 1 - res} |
|
|
|
|
| norm_mean = [0.485, 0.456, 0.406] |
| norm_std = [0.229, 0.224, 0.225] |
| vanilla_transform = torchvision.transforms.Compose([ |
| torchvision.transforms.Resize(224), |
| torchvision.transforms.ToTensor(), |
| torchvision.transforms.Normalize(norm_mean, norm_std)]) |
|
|
| model = init_model("model-score0.96-f1_10.9-f1_20.99.pt") |
| if torch.cuda.is_available(): |
| device = torch.device("cuda") |
| else: |
| device = torch.device("cpu") |
| model = model.to(device) |
|
|
|
|
| examples = [['tibo.png'], ['tibo2.png'], ['real_pull_up.png'], ['no_pull_up.png'], ['doge.jpg']] |
| iface = gr.Interface(inference, live=True, inputs=gr.inputs.Image(source="upload", type='pil'), |
| outputs=gr.outputs.Label(), |
| examples=examples, |
| enable_queue=True) |
|
|
| iface.test_launch() |
| if __name__ == "__main__": |
| iface.launch() |
|
|