Spaces:
Sleeping
Sleeping
ad
Browse files- README.md +3 -3
- app.py +15 -0
- requirements.txt +4 -0
README.md
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: indigo
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 4.22.0
|
|
|
|
| 1 |
---
|
| 2 |
+
title: CLIP Distance
|
| 3 |
+
emoji: 😻
|
| 4 |
+
colorFrom: blue
|
| 5 |
colorTo: indigo
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 4.22.0
|
app.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from sentence_transformers import SentenceTransformer
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from scipy.spatial import distance
|
| 5 |
+
|
| 6 |
+
model = SentenceTransformer('clip-ViT-B-32')
|
| 7 |
+
|
| 8 |
+
def image_distance(image1, image2) -> float:
|
| 9 |
+
embedding1 = model.encode(sentences=[Image.fromarray(image1)], batch_size=128)
|
| 10 |
+
embedding2 = model.encode(sentences=[Image.fromarray(image2)], batch_size=128)
|
| 11 |
+
|
| 12 |
+
return 1 - distance.cosine(embedding1, embedding2)
|
| 13 |
+
|
| 14 |
+
demo = gr.Interface(fn=image_distance, inputs=["image", "image"], outputs="label")
|
| 15 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio~=3.41.2
|
| 2 |
+
sentence-transformers
|
| 3 |
+
numpy~=1.25.2
|
| 4 |
+
scipy
|