Update app.py
Browse files
app.py
CHANGED
|
@@ -8,18 +8,17 @@ model, preprocess = clip.load("ViT-B/32", device=device)
|
|
| 8 |
|
| 9 |
def clip(image, text):
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
# text = clip.tokenize([text]).to(device)
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
|
| 22 |
-
|
| 23 |
|
| 24 |
demo = gr.Interface(fn=clip, inputs=["text", "image"], outputs="text")
|
| 25 |
demo.launch()
|
|
|
|
| 8 |
|
| 9 |
def clip(image, text):
|
| 10 |
|
| 11 |
+
image = preprocess(image).unsqueeze(0).to(device)
|
| 12 |
+
text = clip.tokenize([text]).to(device)
|
|
|
|
| 13 |
|
| 14 |
+
with torch.no_grad():
|
| 15 |
+
image_features = model.encode_image(image)
|
| 16 |
+
text_features = model.encode_text(text)
|
| 17 |
|
| 18 |
+
logits_per_image, logits_per_text = model(image, text)
|
| 19 |
+
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
|
| 20 |
|
| 21 |
+
return probs[0]
|
| 22 |
|
| 23 |
demo = gr.Interface(fn=clip, inputs=["text", "image"], outputs="text")
|
| 24 |
demo.launch()
|