Spaces:
Runtime error
Runtime error
| import clip | |
| import gc | |
| import numpy as np | |
| import os | |
| import pandas as pd | |
| import requests | |
| import torch | |
| import torchvision.transforms as T | |
| import torchvision.transforms.functional as TF | |
| #from IPython.display import display | |
| from PIL import Image | |
| from torch import nn | |
| from torch.nn import functional as F | |
| from torchvision import transforms | |
| from torchvision.transforms.functional import InterpolationMode | |
| from BLIP.models.blip import blip_decoder | |
| import gradio as gr | |
| device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') | |
| blip_image_eval_size = 384 | |
| blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth' | |
| blip_model = blip_decoder(pretrained=blip_model_url, image_size=blip_image_eval_size, vit='base') | |
| blip_model.eval() | |
| blip_model = blip_model.to(device) | |
| def greet(name): | |
| return "hi " + name + "!!" | |
| iface = gr.Interface(fn=greet, inputs="text", outputs="text") | |
| iface.launch() |