File size: 1,642 Bytes
38bac22
 
 
 
 
 
 
 
 
 
 
 
a65f8bb
38bac22
16a879c
38bac22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import pandas as pd
from sentence_transformers import SentenceTransformer
import torch
from app.utils import remove_numbers
from app.qdrant_client import client
from qdrant_client.http import models
from pympler import asizeof
print("Loading model and data...")

device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Using device: {device}")

model = SentenceTransformer("app/my_finetuned_modelV2", device=device)
print("model size : ", asizeof.asizeof(model))
df = pd.read_csv("app/data/cleaned_fileV2.csv")

df['answer'] = df['answer'].apply(remove_numbers)
ayat = list(set(df['answer']))
print(f"Total unique ayat loaded: {asizeof.asizeof(ayat)}") 

print("✅ Model and embeddings ready.")


# --- Check if collection exists ---
collections = [c.name for c in client.get_collections().collections]
if "ayahs_collection" not in collections:
    print("Creating Qdrant collection and uploading embeddings...")
    embeddings = model.encode(ayat, convert_to_tensor=False).tolist()
    client.recreate_collection(
        collection_name="ayahs_collection",
        
        vectors_config=models.VectorParams(
            size=len(embeddings[0]),
            distance=models.Distance.COSINE
        ),
    )

    points = [
        models.PointStruct(
            id=idx,
            vector=emb,
            payload={"text": ayah}
        )
        for idx, (emb, ayah) in enumerate(zip(embeddings, ayat))
    ]

    client.upsert(collection_name="ayahs_collection", points=points)
    print("✅ Embeddings uploaded to Qdrant.")
else:
    print("✅ Collection already exists, skipping upload.")
    # Load embeddings from Qdrant