MatanKriel commited on
Commit
0ffa00f
·
verified ·
1 Parent(s): 9930b5f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -2
app.py CHANGED
@@ -20,9 +20,31 @@ except Exception as e:
20
  print(f"❌ Model Error: {e}")
21
 
22
  # --- 2. LOAD DATA ---
 
23
  print("⏳ Loading Dataset...")
24
- # Load exact 5k subset used in training
25
- dataset = load_dataset("ethz/food101", split="train").shuffle(seed=42).select(range(5000))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  # --- 3. LOAD EMBEDDINGS ---
28
  print(f"⏳ Loading Embeddings from {DATA_FILE}...")
 
20
  print(f"❌ Model Error: {e}")
21
 
22
  # --- 2. LOAD DATA ---
23
+ # --- 2. LOAD DATA (SMART MATCHING) ---
24
  print("⏳ Loading Dataset...")
25
+
26
+ # 1. Load the Embeddings File FIRST
27
+ df = pd.read_parquet(DATA_FILE)
28
+ valid_indices = df.index.tolist() # Assuming you preserved the original indices in the dataframe index
29
+ # OR if you reset the index in the notebook, we just check the length:
30
+ num_embeddings = len(df)
31
+
32
+ print(f" 👉 Embeddings file has {num_embeddings} rows.")
33
+
34
+ # 2. Load the Dataset
35
+ dataset_full = load_dataset("ethz/food101", split="train").shuffle(seed=42).select(range(5000))
36
+
37
+ # 3. CRITICAL FIX: If lengths don't match, we assume the parquet is a subset.
38
+ # (This is a guess - if you didn't save the original indices, this might still be slightly off,
39
+ # but it prevents the 'IndexError' crash).
40
+ if len(dataset_full) > num_embeddings:
41
+ print(f"⚠️ DATA MISMATCH DETECTED: Dataset has {len(dataset_full)} but Parquet has {num_embeddings}.")
42
+ print(" ✂️ Truncating dataset to match Parquet length...")
43
+ dataset = dataset_full.select(range(num_embeddings))
44
+ else:
45
+ dataset = dataset_full
46
+
47
+ print(f"✅ Final Dataset Size: {len(dataset)}")
48
 
49
  # --- 3. LOAD EMBEDDINGS ---
50
  print(f"⏳ Loading Embeddings from {DATA_FILE}...")