forouzanfallah commited on
Commit
b1af3e6
·
verified ·
1 Parent(s): a4aad7d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +971 -0
app.py ADDED
@@ -0,0 +1,971 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import random
5
+ import hashlib
6
+ import threading
7
+ from dataclasses import dataclass
8
+ from typing import List, Dict, Any
9
+
10
+ from PIL import Image, ImageDraw, ImageFont
11
+ import numpy as np
12
+ import gradio as gr
13
+ from filelock import FileLock
14
+ # ----------------------
15
+ # Configuration
16
+ # ----------------------
17
+
18
+ SR_DIR = os.environ.get("SR_DIR", "./data/sr")
19
+ GT_DIR = os.environ.get("GT_DIR", "./data/gt")
20
+ RESULTS_DIR = os.environ.get("RESULTS_DIR", "./results")
21
+ PROGRESS_PATH = os.path.join(RESULTS_DIR, "progress.json")
22
+ ALL_RESULTS_JSONL = os.path.join(RESULTS_DIR, "all_results.jsonl")
23
+ SAVE_PII = True # set False to only store hashed user_id
24
+
25
+
26
+ # NEW — global coordination files
27
+ ASSIGN_PATH = os.path.join(RESULTS_DIR, "assignments.json") # in-flight reservations
28
+ ROUND_PATH = os.path.join(RESULTS_DIR, "round.json") # current round state
29
+
30
+ # NEW — cross-process locks for those files
31
+ ASSIGN_LOCK = FileLock(ASSIGN_PATH + ".lock")
32
+ ROUND_LOCK = FileLock(ROUND_PATH + ".lock")
33
+
34
+ # NEW — reservation expires if user disappears before submitting (seconds)
35
+ RESERVATION_TTL = 600
36
+
37
+
38
+
39
+ # Tile grid config: 20 tiles -> 4 rows x 5 columns
40
+ TILE_ROWS = 4
41
+ TILE_COLS = 5
42
+ N_TILES = TILE_ROWS * TILE_COLS
43
+
44
+ # Which image is used for the RTS detection / tile selection part:
45
+ # "sr" -> use SR image; "gt" -> use GT image
46
+ TEST_IMAGE_SOURCE = "sr"
47
+
48
+ # Thread lock for writing files safely in multi-user scenarios
49
+ WRITE_LOCK = threading.Lock()
50
+
51
+ # ----------------------
52
+ # Data model
53
+ # ----------------------
54
+ @dataclass
55
+ class Sample:
56
+ sample_id: str
57
+ sr_path: str
58
+ gt_path: str
59
+
60
+
61
+ # === GLOBAL ROUND / ASSIGNMENT HELPERS ===
62
+
63
+ def _load_json(path: str) -> Dict[str, Any]:
64
+ if not os.path.exists(path):
65
+ return {}
66
+ try:
67
+ with open(path, "r", encoding="utf-8") as f:
68
+ return json.load(f)
69
+ except Exception:
70
+ return {}
71
+
72
+ def _save_json(path: str, data: Dict[str, Any]):
73
+ os.makedirs(os.path.dirname(path), exist_ok=True)
74
+ with open(path, "w", encoding="utf-8") as f:
75
+ json.dump(data, f, ensure_ascii=False, indent=2)
76
+
77
+ def get_round_and_left(total: int) -> tuple[int, int]:
78
+ """Return (current_round_number, images_left_in_this_round)."""
79
+ with ROUND_LOCK:
80
+ st = _load_json(ROUND_PATH) or {}
81
+ r = int(st.get("round", 1))
82
+ done = set(st.get("done", []))
83
+ left = max(0, total - len(done))
84
+ return r, left
85
+
86
+ def mark_done_and_maybe_rollover(sample_id: str, total: int) -> tuple[int, int, bool]:
87
+ """
88
+ Mark sample as done for the current global round.
89
+ Returns (round_number_after_op, images_left_after_op, rolled_over_bool).
90
+ If we finished the round, it automatically starts the next round.
91
+ """
92
+ with ROUND_LOCK:
93
+ st = _load_json(ROUND_PATH) or {}
94
+ st.setdefault("round", 1)
95
+ done = set(st.get("done", []))
96
+ rolled = False
97
+
98
+ if sample_id not in done:
99
+ done.add(sample_id)
100
+
101
+ if len(done) >= total:
102
+ # complete round, roll to next
103
+ st["round"] = int(st["round"]) + 1
104
+ st["done"] = []
105
+ _save_json(ROUND_PATH, st)
106
+ return int(st["round"]), total, True
107
+ else:
108
+ st["done"] = sorted(done)
109
+ _save_json(ROUND_PATH, st)
110
+ left = total - len(done)
111
+ return int(st["round"]), left, False
112
+
113
+ def _cleanup_and_load_assignments() -> Dict[str, Any]:
114
+ """Drop expired reservations and return the map."""
115
+ now = time.time()
116
+ with ASSIGN_LOCK:
117
+ a = _load_json(ASSIGN_PATH) or {}
118
+ fresh = {k: v for k, v in a.items() if now - v.get("ts", 0) < RESERVATION_TTL}
119
+ if len(fresh) != len(a):
120
+ _save_json(ASSIGN_PATH, fresh)
121
+ return fresh
122
+
123
+ def release_reservation(sample_id: str):
124
+ with ASSIGN_LOCK:
125
+ a = _load_json(ASSIGN_PATH) or {}
126
+ if a.pop(sample_id, None) is not None:
127
+ _save_json(ASSIGN_PATH, a)
128
+
129
+ def reserve_next_sample(uid: str, samples: List[Sample]) -> int:
130
+ """
131
+ Reserve and return an index into `samples` that:
132
+ - is NOT already done in the current round
133
+ - is NOT reserved by someone else (unless reservation expired)
134
+ If the round is already complete, it rolls over and picks from the new round.
135
+ """
136
+ total = len(samples)
137
+ with ASSIGN_LOCK, ROUND_LOCK:
138
+ # Clean old reservations first
139
+ assignments = _cleanup_and_load_assignments()
140
+
141
+ # Load round state
142
+ st = _load_json(ROUND_PATH) or {}
143
+ st.setdefault("round", 1)
144
+ st.setdefault("done", [])
145
+ done = set(st["done"])
146
+
147
+ # If current round is exhausted, roll now so users can continue seamlessly
148
+ if len(done) >= total:
149
+ st["round"] = int(st["round"]) + 1
150
+ st["done"] = []
151
+ done = set()
152
+ _save_json(ROUND_PATH, st)
153
+
154
+ assigned = set(assignments.keys())
155
+
156
+ # Prefer: not-done + not-assigned
157
+ candidates = [i for i, s in enumerate(samples)
158
+ if s.sample_id not in done and s.sample_id not in assigned]
159
+
160
+ # If everything not-done is currently reserved, fall back to any not-assigned (rare)
161
+ if not candidates:
162
+ candidates = [i for i, s in enumerate(samples) if s.sample_id not in assigned]
163
+ if not candidates:
164
+ candidates = list(range(total)) # last resort
165
+
166
+ idx = random.choice(candidates)
167
+ assignments[samples[idx].sample_id] = {"uid": uid, "ts": time.time()}
168
+ _save_json(ASSIGN_PATH, assignments)
169
+ return idx
170
+ # === END GLOBAL ROUND / ASSIGNMENT HELPERS ===
171
+
172
+
173
+ def ensure_paths():
174
+ os.makedirs(RESULTS_DIR, exist_ok=True)
175
+ if not os.path.isdir(SR_DIR) or not os.path.isdir(GT_DIR):
176
+ raise FileNotFoundError(
177
+ f"Expected directories '{SR_DIR}' and '{GT_DIR}'.\n"
178
+ "Create them and add matching SR/GT image pairs."
179
+ )
180
+
181
+
182
+ def list_image_files(folder: str) -> List[str]:
183
+ supported = {".png", ".jpg", ".jpeg", ".bmp", ".tif", ".tiff"}
184
+ return sorted(
185
+ [os.path.join(folder, f) for f in os.listdir(folder)
186
+ if os.path.splitext(f.lower())[1] in supported]
187
+ )
188
+
189
+
190
+ def load_dataset(sr_dir: str, gt_dir: str) -> List[Sample]:
191
+ sr_files = list_image_files(sr_dir)
192
+ gt_files = list_image_files(gt_dir)
193
+
194
+ def key(path: str) -> str:
195
+ return os.path.splitext(os.path.basename(path))[0]
196
+
197
+ gt_map = {key(p): p for p in gt_files}
198
+ samples: List[Sample] = []
199
+
200
+ for sr_path in sr_files:
201
+ k = key(sr_path)
202
+ if k in gt_map:
203
+ samples.append(Sample(sample_id=k, sr_path=sr_path, gt_path=gt_map[k]))
204
+
205
+ if not samples:
206
+ raise RuntimeError(
207
+ f"No matched SR/GT pairs found. Ensure filenames match in {sr_dir} and {gt_dir}."
208
+ )
209
+ return samples
210
+
211
+
212
+ # ----------------------
213
+ # Progress & results I/O
214
+ # ----------------------
215
+
216
+ def hash_user_id(name: str, address: str) -> str:
217
+ norm = (name or "").strip().lower() + "|" + (address or "").strip().lower()
218
+ return hashlib.sha256(norm.encode("utf-8")).hexdigest()[:16]
219
+
220
+
221
+ def load_progress() -> Dict[str, Dict[str, Any]]:
222
+ if not os.path.exists(PROGRESS_PATH):
223
+ return {}
224
+ try:
225
+ with open(PROGRESS_PATH, "r", encoding="utf-8") as f:
226
+ return json.load(f)
227
+ except Exception:
228
+ return {}
229
+
230
+
231
+ def save_progress(progress: Dict[str, Dict[str, Any]]):
232
+ with WRITE_LOCK:
233
+ with open(PROGRESS_PATH, "w", encoding="utf-8") as f:
234
+ json.dump(progress, f, ensure_ascii=False, indent=2)
235
+
236
+
237
+ def append_jsonl(path: str, record: Dict[str, Any]):
238
+ line = json.dumps(record, ensure_ascii=False)
239
+ with WRITE_LOCK:
240
+ with open(path, "a", encoding="utf-8") as f:
241
+ f.write(line + "\n")
242
+
243
+
244
+ # ----------------------
245
+ # Image utilities
246
+ # ----------------------
247
+
248
+ def load_image(path: str) -> Image.Image:
249
+ img = Image.open(path).convert("RGB")
250
+ return img
251
+
252
+
253
+ def draw_grid_overlay(
254
+ base_img: Image.Image,
255
+ tiles_selected: List[int] | None,
256
+ rows: int = TILE_ROWS,
257
+ cols: int = TILE_COLS,
258
+ show_numbers: bool = True,
259
+ ) -> Image.Image:
260
+ """Draw a grid and (optionally) a semi-transparent overlay over selected tiles.
261
+ tiles_selected: list of 1-based tile indices
262
+ """
263
+ # Ensure PIL Image (Gradio may pass numpy arrays)
264
+ if isinstance(base_img, np.ndarray):
265
+ arr = base_img
266
+ if arr.dtype != np.uint8:
267
+ # Normalize float [0,1] to [0,255] or clip other ranges
268
+ if arr.max() <= 1.0:
269
+ arr = (arr * 255).clip(0, 255).astype(np.uint8)
270
+ else:
271
+ arr = np.clip(arr, 0, 255).astype(np.uint8)
272
+ if arr.ndim == 2:
273
+ img = Image.fromarray(arr, mode="L").convert("RGB")
274
+ else:
275
+ img = Image.fromarray(arr).convert("RGB")
276
+ else:
277
+ img = base_img.copy()
278
+ W, H = img.size
279
+ draw = ImageDraw.Draw(img, "RGBA")
280
+
281
+ # grid lines
282
+ for r in range(1, rows):
283
+ y = int(H * r / rows)
284
+ draw.line([(0, y), (W, y)], fill=(255, 255, 255, 180), width=2)
285
+ for c in range(1, cols):
286
+ x = int(W * c / cols)
287
+ draw.line([(x, 0), (x, H)], fill=(255, 255, 255, 180), width=2)
288
+
289
+ # overlay selected
290
+ tiles_selected = tiles_selected or []
291
+ for idx in tiles_selected:
292
+ idx0 = idx - 1
293
+ r = idx0 // cols
294
+ c = idx0 % cols
295
+ x0 = int(W * c / cols)
296
+ y0 = int(H * r / rows)
297
+ x1 = int(W * (c + 1) / cols)
298
+ y1 = int(H * (r + 1) / rows)
299
+ draw.rectangle([(x0, y0), (x1, y1)], fill=(255, 0, 0, 70), outline=(255, 0, 0, 160), width=3)
300
+
301
+ if show_numbers:
302
+ try:
303
+ font = ImageFont.load_default()
304
+ except Exception:
305
+ font = None
306
+ for i in range(rows * cols):
307
+ idx = i + 1
308
+ r = i // cols
309
+ c = i % cols
310
+ x_center = int(W * (c + 0.5) / cols)
311
+ y_center = int(H * (r + 0.5) / rows)
312
+ text = str(idx)
313
+ bbox = draw.textbbox((x_center, y_center), text, font=font, anchor="mm")
314
+ draw.rectangle(bbox, fill=(0, 0, 0, 110))
315
+ draw.text((x_center, y_center), text, fill=(255, 255, 255, 230), anchor="mm", font=font)
316
+
317
+ return img
318
+
319
+
320
+ # ----------------------
321
+ # App state helpers
322
+ # ----------------------
323
+
324
+ STEP_DETECTION = 1
325
+ STEP_CONFIDENCE = 2
326
+ STEP_TILES = 3
327
+ STEP_CLARITY = 4
328
+ STEP_REASONING = 5
329
+
330
+
331
+ def pick_next_index(user_seen: List[str], samples: List[Sample]) -> int:
332
+ seen_set = set(user_seen)
333
+ remaining = [i for i, s in enumerate(samples) if s.sample_id not in seen_set]
334
+ if remaining:
335
+ return random.choice(remaining)
336
+ else:
337
+ return random.randrange(len(samples))
338
+
339
+
340
+ def get_test_image_path(sample: Sample) -> str:
341
+ return sample.sr_path if TEST_IMAGE_SOURCE == "sr" else sample.gt_path
342
+
343
+
344
+ def allowed_steps(detect_choice: str | None) -> List[int]:
345
+ steps = [STEP_DETECTION, STEP_CONFIDENCE]
346
+ if detect_choice == "Yes":
347
+ steps.append(STEP_TILES)
348
+ steps.extend([STEP_CLARITY, STEP_REASONING])
349
+ return steps
350
+
351
+
352
+ def step_display_index(step: int, detect_choice: str | None) -> tuple[int, int]:
353
+ seq = allowed_steps(detect_choice)
354
+ total = len(seq)
355
+ if step not in seq:
356
+ # if on TILES but detect != Yes, normalize to CLARITY
357
+ step = STEP_CLARITY
358
+ idx = seq.index(step) + 1
359
+ return idx, total
360
+
361
+
362
+ # ----------------------
363
+ # Gradio callbacks (core)
364
+ # ----------------------
365
+
366
+ def start_or_resume(name: str, address: str):
367
+ if not name or not address:
368
+ raise gr.Error("Please enter your name and address to begin.")
369
+
370
+ ensure_paths()
371
+ samples = load_dataset(SR_DIR, GT_DIR)
372
+
373
+ uid = hash_user_id(name, address)
374
+ progress = load_progress()
375
+ if uid not in progress:
376
+ progress[uid] = {"seen": []}
377
+ save_progress(progress)
378
+
379
+ user_seen: List[str] = progress[uid].get("seen", [])
380
+
381
+ # idx = pick_next_index(user_seen, samples)
382
+ # sample = samples[idx]
383
+ idx = reserve_next_sample(uid, samples)
384
+ sample = samples[idx]
385
+
386
+ # Randomize A/B mapping
387
+ a_is_sr = bool(random.getrandbits(1))
388
+
389
+ # Prepare images
390
+ test_img = load_image(get_test_image_path(sample))
391
+ overlay_img = draw_grid_overlay(test_img, [])
392
+
393
+ img_a = load_image(sample.sr_path if a_is_sr else sample.gt_path)
394
+ img_b = load_image(sample.gt_path if a_is_sr else sample.sr_path)
395
+
396
+ # left_in_round = max(0, len(samples) - len(set(user_seen))) if len(set(user_seen)) < len(samples) else 0
397
+
398
+ # status = (
399
+ # f"Welcome, {name}. Images left in your first round: {left_in_round}.\n"
400
+ # f"Current sample: {sample.sample_id}"
401
+ # )
402
+ rnum, left = get_round_and_left(len(samples))
403
+ status = (
404
+ f"Welcome, {name}. Global round {rnum} — images left in this round: {left}.\n"
405
+ f"Current sample: {sample.sample_id}"
406
+ )
407
+
408
+ os.makedirs(RESULTS_DIR, exist_ok=True)
409
+ user_file_path = os.path.join(RESULTS_DIR, f"{uid}.jsonl")
410
+
411
+ return (
412
+ uid, samples, user_seen, idx, a_is_sr,
413
+ test_img, overlay_img, img_a, img_b, status,
414
+ user_file_path,
415
+ gr.update(visible=True), # reveal the evaluation panel
416
+ [], # reset tile selections
417
+ 1, # current step -> detection
418
+ gr.update(visible=False), # hide intro
419
+ gr.update(visible=False), # hide start form
420
+ )
421
+
422
+
423
+ def update_overlay(test_img: Image.Image, selected_tiles: List[str] | None):
424
+ tiles = sorted([int(x) for x in (selected_tiles or [])])
425
+ if test_img is None:
426
+ return None
427
+ return draw_grid_overlay(test_img, tiles)
428
+
429
+
430
+ def _save_record_and_progress(
431
+ name: str,
432
+ address: str,
433
+ uid: str,
434
+ samples: List[Sample],
435
+ user_seen: List[str],
436
+ idx: int,
437
+ a_is_sr: bool,
438
+ detection_choice: str,
439
+ confidence: int,
440
+ selected_tiles: List[str] | None,
441
+ clarity_choice: str,
442
+ reasoning: str,
443
+ ):
444
+ """Common save logic used by both 'finish' and 'next image' paths."""
445
+ if not name or not address:
446
+ raise gr.Error("Please enter your name and address.")
447
+ if detection_choice not in {"Yes", "No", "Unsure"}:
448
+ detection_choice = "Unsure"
449
+ valid_clarity = {
450
+ "Image A is much clearer",
451
+ "Image A is slightly clearer",
452
+ "Both are about the same",
453
+ "Image B is slightly clearer",
454
+ "Image B is much clearer",
455
+ }
456
+ if clarity_choice not in valid_clarity:
457
+ clarity_choice = None
458
+
459
+ confidence = int(confidence)
460
+ selected_tiles_int = sorted([int(x) for x in (selected_tiles or [])])
461
+ sample = samples[idx]
462
+
463
+ record: Dict[str, Any] = {
464
+ "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
465
+ "user_id": uid,
466
+ "name": name if SAVE_PII else None,
467
+ "address": address if SAVE_PII else None,
468
+ "sample_id": sample.sample_id,
469
+ "sr_path": sample.sr_path,
470
+ "gt_path": sample.gt_path,
471
+ "test_image_source": TEST_IMAGE_SOURCE,
472
+ "ab_mapping": {
473
+ "image_a": "SR" if a_is_sr else "GT",
474
+ "image_b": "GT" if a_is_sr else "SR",
475
+ },
476
+ "responses": {
477
+ "contains_rts": detection_choice,
478
+ "confidence": confidence,
479
+ "selected_tiles": selected_tiles_int if detection_choice == "Yes" else [],
480
+ "clarity": clarity_choice,
481
+ "reasoning": reasoning or "",
482
+ },
483
+ }
484
+
485
+ os.makedirs(RESULTS_DIR, exist_ok=True)
486
+ append_jsonl(os.path.join(RESULTS_DIR, f"{uid}.jsonl"), record)
487
+ append_jsonl(ALL_RESULTS_JSONL, record)
488
+
489
+ progress = load_progress()
490
+ progress.setdefault(uid, {"seen": []})
491
+ seen = set(progress[uid].get("seen", []))
492
+ if sample.sample_id not in seen:
493
+ seen.add(sample.sample_id)
494
+ progress[uid]["seen"] = sorted(list(seen))
495
+ save_progress(progress)
496
+
497
+ return progress
498
+
499
+
500
+ def submit_finish(
501
+ name: str,
502
+ address: str,
503
+ uid: str,
504
+ samples: List[Sample],
505
+ user_seen: List[str],
506
+ idx: int,
507
+ a_is_sr: bool,
508
+ detection_choice: str,
509
+ confidence: int,
510
+ selected_tiles: List[str] | None,
511
+ clarity_choice: str,
512
+ reasoning: str,
513
+ ):
514
+ """Save and show Thank You (no auto-next)."""
515
+ progress = _save_record_and_progress(
516
+ name, address, uid, samples, user_seen, idx, a_is_sr,
517
+ detection_choice, confidence, selected_tiles, clarity_choice, reasoning
518
+ )
519
+
520
+ sample = samples[idx]
521
+ # status = f"Saved! Current sample {sample.sample_id} recorded."
522
+ # NEW — mark global done & release reservation
523
+ round_num, left, rolled = mark_done_and_maybe_rollover(sample.sample_id, len(samples))
524
+ release_reservation(sample.sample_id)
525
+
526
+ if rolled:
527
+ status = f"Saved! Global round {round_num-1} complete — starting Round {round_num}."
528
+ else:
529
+ status = f"Saved! Global round {round_num} — images left: {left}."
530
+
531
+ # Return current state (eval panel will be hidden by to_thanks)
532
+ return (
533
+ progress[uid]["seen"],
534
+ idx,
535
+ a_is_sr,
536
+ load_image(get_test_image_path(sample)),
537
+ draw_grid_overlay(load_image(get_test_image_path(sample)), []),
538
+ load_image(sample.sr_path if a_is_sr else sample.gt_path),
539
+ load_image(sample.gt_path if a_is_sr else sample.sr_path),
540
+ status,
541
+ [],
542
+ gr.update(value=""),
543
+ gr.update(value=None),
544
+ gr.update(value=3),
545
+ gr.update(value=None),
546
+ # 1,
547
+ )
548
+
549
+
550
+ def submit_next_image(
551
+ name: str,
552
+ address: str,
553
+ uid: str,
554
+ samples: List[Sample],
555
+ user_seen: List[str],
556
+ idx: int,
557
+ a_is_sr: bool,
558
+ detection_choice: str,
559
+ confidence: int,
560
+ selected_tiles: List[str] | None,
561
+ clarity_choice: str,
562
+ reasoning: str,
563
+ ):
564
+ """Save and immediately load the next sample."""
565
+ progress = _save_record_and_progress(
566
+ name, address, uid, samples, user_seen, idx, a_is_sr,
567
+ detection_choice, confidence, selected_tiles, clarity_choice, reasoning
568
+ )
569
+ # NEW — mark global done & release current reservation
570
+ current_sample = samples[idx]
571
+ round_num, left, rolled = mark_done_and_maybe_rollover(current_sample.sample_id, len(samples))
572
+ release_reservation(current_sample.sample_id)
573
+
574
+
575
+ # Pick next sample
576
+ # idx_next = pick_next_index(progress[uid]["seen"], samples)
577
+ # sample_next = samples[idx_next]
578
+
579
+ # NEW: reserve next globally available sample
580
+ idx_next = reserve_next_sample(uid, samples)
581
+ sample_next = samples[idx_next]
582
+
583
+ remaining_after = [s for s in samples if s.sample_id not in set(progress[uid]["seen"])]
584
+ round_reset_note = ""
585
+ if not remaining_after:
586
+ round_reset_note = " First round complete — starting a new round (images may repeat)."
587
+
588
+ a_is_sr_next = bool(random.getrandbits(1))
589
+
590
+ test_img_next = load_image(get_test_image_path(sample_next))
591
+ overlay_img_next = draw_grid_overlay(test_img_next, [])
592
+ img_a_next = load_image(sample_next.sr_path if a_is_sr_next else sample_next.gt_path)
593
+ img_b_next = load_image(sample_next.gt_path if a_is_sr_next else sample_next.sr_path)
594
+
595
+ # left_in_round = max(0, len(samples) - len(set(progress[uid]["seen"])))
596
+ # status = (
597
+ # f"Saved! Images left in your first round: {left_in_round}.{round_reset_note}\n"
598
+ # f"Next sample: {sample_next.sample_id}"
599
+ # )
600
+
601
+ rnum, left_after = get_round_and_left(len(samples))
602
+
603
+ round_reset_note = ""
604
+ if rolled:
605
+ round_reset_note = f" Round {round_num-1} complete — starting Round {round_num}."
606
+
607
+ status = (
608
+ f"Saved! Global round {rnum} — images left: {left_after}.{round_reset_note}\n"
609
+ f"Next sample: {sample_next.sample_id}"
610
+ )
611
+
612
+ return (
613
+ progress[uid]["seen"],
614
+ idx_next,
615
+ a_is_sr_next,
616
+ test_img_next,
617
+ overlay_img_next,
618
+ img_a_next,
619
+ img_b_next,
620
+ status,
621
+ [],
622
+ gr.update(value=""),
623
+ gr.update(value=None),
624
+ gr.update(value=3),
625
+ gr.update(value=None),
626
+ 1,
627
+ )
628
+
629
+
630
+ # ----------------------
631
+ # Wizard helpers (visibility + navigation)
632
+ # ----------------------
633
+
634
+ def step_label(step: int, detect_choice: str | None) -> str:
635
+ names = {
636
+ STEP_DETECTION: "RTS detection",
637
+ STEP_CONFIDENCE: "Confidence",
638
+ STEP_TILES: "Select tiles",
639
+ STEP_CLARITY: "A/B clarity",
640
+ STEP_REASONING: "Reasoning",
641
+ }
642
+ i, total = step_display_index(step, detect_choice)
643
+ return f"**Step {i} of {total} — {names.get(step, '')}**"
644
+
645
+
646
+ def apply_step_visibility(current_step: int, detect_choice: str | None):
647
+ # Normalize step if needed
648
+ seq = allowed_steps(detect_choice)
649
+ if current_step not in seq:
650
+ current_step = STEP_CLARITY
651
+
652
+ # Determine which groups are visible
653
+ show_detection = current_step == STEP_DETECTION
654
+ show_confidence = current_step == STEP_CONFIDENCE
655
+ show_tiles = current_step == STEP_TILES and detect_choice == "Yes"
656
+ show_clarity = current_step == STEP_CLARITY
657
+ show_reasoning = current_step == STEP_REASONING
658
+
659
+ # Nav buttons
660
+ idx, total = step_display_index(current_step, detect_choice)
661
+ show_back = idx > 1
662
+ show_next = idx < total # Submit buttons only on last step
663
+
664
+ return (
665
+ gr.update(value=step_label(current_step, detect_choice)),
666
+ gr.update(visible=show_detection),
667
+ gr.update(visible=show_confidence),
668
+ gr.update(visible=show_tiles),
669
+ gr.update(visible=show_clarity),
670
+ gr.update(visible=show_reasoning),
671
+ gr.update(visible=show_back),
672
+ gr.update(visible=show_next),
673
+ )
674
+
675
+
676
+ def go_next_step(current_step: int, detect_choice: str | None, clarity_choice: str | None):
677
+ # Basic validation at key steps
678
+ if current_step == STEP_DETECTION and detect_choice not in {"Yes", "No", "Unsure"}:
679
+ raise gr.Error("Please answer the RTS detection question before continuing.")
680
+ if current_step == STEP_CLARITY and clarity_choice not in {
681
+ "Image A is much clearer",
682
+ "Image A is slightly clearer",
683
+ "Both are about the same",
684
+ "Image B is slightly clearer",
685
+ "Image B is much clearer",
686
+ }:
687
+ raise gr.Error("Please select your A/B clarity rating before continuing.")
688
+
689
+ seq = allowed_steps(detect_choice)
690
+ if current_step not in seq:
691
+ current_step = seq[0]
692
+ pos = seq.index(current_step)
693
+ next_pos = min(pos + 1, len(seq) - 1)
694
+ return seq[next_pos]
695
+
696
+
697
+ def go_prev_step(current_step: int, detect_choice: str | None):
698
+ seq = allowed_steps(detect_choice)
699
+ if current_step not in seq:
700
+ current_step = seq[0]
701
+ pos = seq.index(current_step)
702
+ prev_pos = max(pos - 1, 0)
703
+ return seq[prev_pos]
704
+
705
+
706
+ def reset_to_step1():
707
+ return 1
708
+
709
+
710
+ # --- Thank-you / restart helpers ---
711
+ def to_thanks(name: str):
712
+ """Hide eval panel and show the thank-you screen with a personalized message."""
713
+ msg = f"### ✅ Thanks, {name}! Your responses were saved.\n\nClick **Start Again** to evaluate another image."
714
+ return gr.update(visible=False), gr.update(visible=True), gr.update(value=msg)
715
+
716
+
717
+ def hide_thanks():
718
+ """Hide the thank-you screen (used when starting/resuming)."""
719
+ return gr.update(visible=False)
720
+
721
+
722
+ # ----------------------
723
+ # UI
724
+ # ----------------------
725
+ with gr.Blocks(title="RTS Human Evaluation", theme=gr.themes.Soft()) as demo:
726
+ intro_md = gr.Markdown(
727
+ """
728
+ # Retrogressive Thaw Slump (RTS) Human Evaluation
729
+
730
+ **Instructions:**
731
+ 1. Enter your name and address, then click **Start / Resume**.
732
+ 2. Use **Next** and **Back** to go through the questions.
733
+ 3. On the **last step**, choose **Submit & Next Image** to continue, or **Submit & Finish** to end and see a Thank You screen (with **Start Again**).
734
+ """
735
+ )
736
+
737
+ # Hidden states
738
+ state_uid = gr.State("")
739
+ state_samples = gr.State([])
740
+ state_seen = gr.State([])
741
+ state_idx = gr.State(-1)
742
+ state_a_is_sr = gr.State(True)
743
+ state_step = gr.State(1) # wizard step
744
+
745
+ with gr.Group() as start_group:
746
+ with gr.Row():
747
+ name = gr.Textbox(label="Full name", placeholder="Jane Doe", autofocus=True)
748
+ address = gr.Textbox(label="Address", placeholder="123 Pine St, City / or Email")
749
+ start_btn = gr.Button("Start / Resume", variant="primary")
750
+ status = gr.Markdown("\n")
751
+
752
+ eval_panel = gr.Group(visible=False)
753
+ with eval_panel:
754
+ step_status = gr.Markdown(visible=True)
755
+
756
+ # Step 1 — Detection
757
+ group_detection = gr.Group(visible=False)
758
+ with group_detection:
759
+ with gr.Row():
760
+ test_img = gr.Image(label="Test Image", interactive=False, type="pil")
761
+ detect = gr.Radio(
762
+ label="Does this image contain a Retrogressive Thaw Slump?",
763
+ choices=["Yes", "No", "Unsure"],
764
+ )
765
+
766
+ # Step 2 — Confidence
767
+ group_confidence = gr.Group(visible=False)
768
+ with group_confidence:
769
+ conf = gr.Slider(1, 5, value=3, step=1, label="Confidence (1=low, 5=high)")
770
+
771
+ # Step 3 — Tiles (only if detect == "Yes")
772
+ group_tiles = gr.Group(visible=False)
773
+ with group_tiles:
774
+ gr.Markdown(
775
+ f"If you answered **Yes**, select tiles around the main RTS features (grid {TILE_ROWS}×{TILE_COLS} = {N_TILES})."
776
+ )
777
+ with gr.Row():
778
+ overlay_img = gr.Image(label="Tile Overlay (selected in red)", interactive=False)
779
+ tile_choices = [str(i) for i in range(1, N_TILES + 1)]
780
+ tiles = gr.CheckboxGroup(
781
+ choices=tile_choices,
782
+ label="Select tiles",
783
+ )
784
+ tiles.change(update_overlay, inputs=[test_img, tiles], outputs=[overlay_img])
785
+
786
+ # Step 4 — A/B clarity
787
+ group_clarity = gr.Group(visible=False)
788
+ with group_clarity:
789
+ gr.Markdown("Which image provides a clearer overall view of the geological features?")
790
+ with gr.Row():
791
+ image_a = gr.Image(label="Image A", interactive=False)
792
+ image_b = gr.Image(label="Image B", interactive=False)
793
+ clarity = gr.Radio(
794
+ label="Clarity (A vs B)",
795
+ choices=[
796
+ "Image A is much clearer",
797
+ "Image A is slightly clearer",
798
+ "Both are about the same",
799
+ "Image B is slightly clearer",
800
+ "Image B is much clearer",
801
+ ],
802
+ )
803
+
804
+ # Step 5 — Reasoning + Submit (only here)
805
+ group_reasoning = gr.Group(visible=False)
806
+ with group_reasoning:
807
+ reasoning = gr.Textbox(label="Briefly explain your reasoning (artifacts, enhancements, etc.)", lines=3)
808
+ with gr.Row():
809
+ submit_next_btn = gr.Button("Submit & Next Image", variant="secondary")
810
+ submit_finish_btn = gr.Button("Submit & Finish", variant="primary")
811
+ your_jsonl_path = gr.Textbox(label="Your results file path (for reference)", interactive=False)
812
+
813
+ # Nav row (no global Submit here)
814
+ with gr.Row():
815
+ back_btn = gr.Button("Back")
816
+ next_btn = gr.Button("Next", variant="secondary")
817
+
818
+ # Thank You screen
819
+ with gr.Group(visible=False) as thanks_group:
820
+ thanks_md = gr.Markdown("### ✅ Thanks! Your responses were saved.\n\nClick **Start Again** to evaluate another image.")
821
+ restart_btn = gr.Button("Start Again", variant="primary")
822
+
823
+ # --- Wiring ---
824
+ start_event = start_btn.click(
825
+ start_or_resume,
826
+ inputs=[name, address],
827
+ outputs=[
828
+ state_uid, state_samples, state_seen, state_idx, state_a_is_sr,
829
+ test_img, overlay_img, image_a, image_b, status, your_jsonl_path, eval_panel, tiles, state_step, intro_md, start_group
830
+ ],
831
+ )
832
+
833
+ # Hide Thank You (if it was open) and show step 1 visibility
834
+ start_event.then(hide_thanks, inputs=None, outputs=[thanks_group])
835
+ start_event.then(
836
+ apply_step_visibility,
837
+ inputs=[state_step, detect],
838
+ outputs=[step_status, group_detection, group_confidence, group_tiles, group_clarity, group_reasoning, back_btn, next_btn],
839
+ )
840
+
841
+ # Navigation: Next
842
+ next_click = next_btn.click(
843
+ go_next_step,
844
+ inputs=[state_step, detect, clarity],
845
+ outputs=[state_step],
846
+ )
847
+ next_click.then(
848
+ apply_step_visibility,
849
+ inputs=[state_step, detect],
850
+ outputs=[step_status, group_detection, group_confidence, group_tiles, group_clarity, group_reasoning, back_btn, next_btn],
851
+ )
852
+
853
+ # Navigation: Back
854
+ back_click = back_btn.click(
855
+ go_prev_step,
856
+ inputs=[state_step, detect],
857
+ outputs=[state_step],
858
+ )
859
+ back_click.then(
860
+ apply_step_visibility,
861
+ inputs=[state_step, detect],
862
+ outputs=[step_status, group_detection, group_confidence, group_tiles, group_clarity, group_reasoning, back_btn, next_btn],
863
+ )
864
+
865
+ # Submit & Finish -> save, then show Thank You
866
+ finish_event = submit_finish_btn.click(
867
+ submit_finish,
868
+ inputs=[
869
+ name,
870
+ address,
871
+ state_uid,
872
+ state_samples,
873
+ state_seen,
874
+ state_idx,
875
+ state_a_is_sr,
876
+ detect,
877
+ conf,
878
+ tiles,
879
+ clarity,
880
+ reasoning,
881
+ ],
882
+ outputs=[
883
+ state_seen,
884
+ state_idx,
885
+ state_a_is_sr,
886
+ test_img,
887
+ overlay_img,
888
+ image_a,
889
+ image_b,
890
+ status,
891
+ tiles,
892
+ reasoning,
893
+ detect,
894
+ conf,
895
+ clarity,
896
+ ],
897
+ )
898
+ finish_event.then(
899
+ to_thanks,
900
+ inputs=[name],
901
+ outputs=[eval_panel, thanks_group, thanks_md],
902
+ )
903
+
904
+ # Submit & Next Image -> save, load next image, reset to step 1
905
+ nextimg_event = submit_next_btn.click(
906
+ submit_next_image,
907
+ inputs=[
908
+ name,
909
+ address,
910
+ state_uid,
911
+ state_samples,
912
+ state_seen,
913
+ state_idx,
914
+ state_a_is_sr,
915
+ detect,
916
+ conf,
917
+ tiles,
918
+ clarity,
919
+ reasoning,
920
+ ],
921
+ outputs=[
922
+ state_seen,
923
+ state_idx,
924
+ state_a_is_sr,
925
+ test_img,
926
+ overlay_img,
927
+ image_a,
928
+ image_b,
929
+ status,
930
+ tiles,
931
+ reasoning,
932
+ detect,
933
+ conf,
934
+ clarity,
935
+ state_step,
936
+ ],
937
+ )
938
+ # nextimg_event.then(reset_to_step1, inputs=None, outputs=[state_step])
939
+ nextimg_event.then(
940
+ apply_step_visibility,
941
+ inputs=[state_step, detect],
942
+ outputs=[step_status, group_detection, group_confidence, group_tiles, group_clarity, group_reasoning, back_btn, next_btn],
943
+ )
944
+ nextimg_event.then(
945
+ apply_step_visibility,
946
+ inputs=[state_step, detect],
947
+ outputs=[step_status, group_detection, group_confidence, group_tiles, group_clarity, group_reasoning, back_btn, next_btn],
948
+ )
949
+
950
+ # Restart: start again using current name/address
951
+ restart_event = restart_btn.click(
952
+ start_or_resume,
953
+ inputs=[name, address],
954
+ outputs=[
955
+ state_uid, state_samples, state_seen, state_idx, state_a_is_sr,
956
+ test_img, overlay_img, image_a, image_b, status, your_jsonl_path, eval_panel, tiles, state_step, intro_md, start_group
957
+ ],
958
+ )
959
+ restart_event.then(hide_thanks, inputs=None, outputs=[thanks_group])
960
+ restart_event.then(
961
+ apply_step_visibility,
962
+ inputs=[state_step, detect],
963
+ outputs=[step_status, group_detection, group_confidence, group_tiles, group_clarity, group_reasoning, back_btn, next_btn],
964
+ )
965
+
966
+
967
+ if __name__ == "__main__":
968
+ ensure_paths()
969
+ _ = load_dataset(SR_DIR, GT_DIR)
970
+ demo.queue()
971
+ demo.launch()