Spaces:
Runtime error
Runtime error
Anusha806
commited on
Commit
·
073de73
1
Parent(s):
ce1c241
commit14
Browse files
app.py
CHANGED
|
@@ -318,7 +318,17 @@ metadata = fashion.remove_columns("image").to_pandas()
|
|
| 318 |
bm25 = BM25Encoder()
|
| 319 |
bm25.fit(metadata["productDisplayName"])
|
| 320 |
model = SentenceTransformer('sentence-transformers/clip-ViT-B-32', device='cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
|
|
|
|
|
|
|
| 321 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
# ------------------- Hybrid Scaling -------------------
|
| 323 |
def hybrid_scale(dense, sparse, alpha: float):
|
| 324 |
|
|
|
|
| 318 |
bm25 = BM25Encoder()
|
| 319 |
bm25.fit(metadata["productDisplayName"])
|
| 320 |
model = SentenceTransformer('sentence-transformers/clip-ViT-B-32', device='cuda' if torch.cuda.is_available() else 'cpu')
|
| 321 |
+
from sentence_transformers import SentenceTransformer
|
| 322 |
+
import torch
|
| 323 |
+
|
| 324 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 325 |
|
| 326 |
+
# load a CLIP model from huggingface
|
| 327 |
+
model = SentenceTransformer(
|
| 328 |
+
'sentence-transformers/clip-ViT-B-32',
|
| 329 |
+
device=device
|
| 330 |
+
)
|
| 331 |
+
model
|
| 332 |
# ------------------- Hybrid Scaling -------------------
|
| 333 |
def hybrid_scale(dense, sparse, alpha: float):
|
| 334 |
|