Spaces:
Build error
Build error
LayBraid
commited on
Commit
·
565dc8e
1
Parent(s):
d8bab2c
add requirements.txt
Browse files- app.py +10 -8
- requirements.txt +2 -2
app.py
CHANGED
|
@@ -1,17 +1,16 @@
|
|
| 1 |
-
import clip
|
| 2 |
import gradio as gr
|
| 3 |
import os
|
| 4 |
import torch
|
| 5 |
from torchvision import transforms
|
| 6 |
from PIL import Image
|
| 7 |
from torchvision.datasets import CIFAR100
|
|
|
|
| 8 |
|
| 9 |
-
model
|
| 10 |
-
|
| 11 |
|
| 12 |
cifar100 = CIFAR100(root=os.path.expanduser("~/.cache"), download=True, train=False)
|
| 13 |
|
| 14 |
-
|
| 15 |
IMG_SIZE = 32 if torch.cuda.is_available() else 32
|
| 16 |
COMPOSED_TRANSFORMERS = transforms.Compose([
|
| 17 |
transforms.Resize(IMG_SIZE),
|
|
@@ -35,10 +34,13 @@ def normalize_tensor(tensor: torch.tensor) -> torch.tensor:
|
|
| 35 |
|
| 36 |
|
| 37 |
def send_inputs(img):
|
| 38 |
-
img = np_array_to_tensor_image(img)
|
| 39 |
-
img = normalize_tensor(img)
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
|
| 44 |
if __name__ == "__main__":
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
| 3 |
import torch
|
| 4 |
from torchvision import transforms
|
| 5 |
from PIL import Image
|
| 6 |
from torchvision.datasets import CIFAR100
|
| 7 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 8 |
|
| 9 |
+
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 10 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 11 |
|
| 12 |
cifar100 = CIFAR100(root=os.path.expanduser("~/.cache"), download=True, train=False)
|
| 13 |
|
|
|
|
| 14 |
IMG_SIZE = 32 if torch.cuda.is_available() else 32
|
| 15 |
COMPOSED_TRANSFORMERS = transforms.Compose([
|
| 16 |
transforms.Resize(IMG_SIZE),
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
def send_inputs(img):
|
| 37 |
+
##img = np_array_to_tensor_image(img)
|
| 38 |
+
##img = normalize_tensor(img)
|
| 39 |
+
inputs = processor(images=img, return_tensors="pt", padding=True)
|
| 40 |
+
outputs = model(**inputs)
|
| 41 |
+
logits_per_image = outputs.logits_per_image
|
| 42 |
+
probs = logits_per_image.softmax(dim=1)
|
| 43 |
+
return probs
|
| 44 |
|
| 45 |
|
| 46 |
if __name__ == "__main__":
|
requirements.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
-
clip~=0.2.0
|
| 2 |
torch~=1.11.0
|
| 3 |
torchvision~=0.12.0
|
| 4 |
gradio~=3.0.2
|
| 5 |
-
Pillow~=9.0.1
|
|
|
|
|
|
|
|
|
| 1 |
torch~=1.11.0
|
| 2 |
torchvision~=0.12.0
|
| 3 |
gradio~=3.0.2
|
| 4 |
+
Pillow~=9.0.1
|
| 5 |
+
transformers~=4.19.4
|