Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torchvision | |
| from torchvision.transforms import transforms | |
| import torch | |
| import requests | |
| # Demo for image classification | |
| model = torchvision.models.resnet18(pretrained=True) | |
| trans_seq = torchvision.transforms.Compose([ | |
| transforms.Resize((224, 224)), | |
| transforms.ToTensor(), | |
| transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), | |
| ]) | |
| model.eval() | |
| # Download human-readable labels for ImageNet. | |
| response = requests.get("https://git.io/JJkYN") | |
| labels = response.text.split("\n") | |
| def predict(image): | |
| """ | |
| Predicts the confidences of different labels for the given image. | |
| Args: | |
| image (torch.Tensor): The input image tensor. | |
| Returns: | |
| dict: A dictionary containing the label names as keys and their corresponding confidences as values. | |
| """ | |
| image = trans_seq(image) | |
| image = image.unsqueeze(0) | |
| with torch.no_grad(): | |
| prediction = torch.nn.functional.softmax(model(image)[0], dim=0) | |
| confidences = {labels[i]: float(prediction[i]) for i in range(1000)} | |
| return confidences | |
| # Pull out some examples from internet images | |
| examples =[ | |
| "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01484850_great_white_shark.JPEG", | |
| "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01443537_goldfish.JPEG", | |
| "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01632777_axolotl.JPEG", | |
| "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01534433_junco.JPEG", | |
| "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01753488_horned_viper.JPEG", | |
| ] | |
| with gr.Blocks(theme="soft") as demo: | |
| input_img = gr.Image(label="Input Image", type="pil") | |
| output = gr.Label(num_top_classes=3) | |
| exam = gr.Examples(examples=examples, examples_per_page=10, inputs=[input_img], outputs=[output]) | |
| input_img.change(predict, inputs=[input_img], outputs=[output]) | |
| demo.launch() |