Numan Saeed commited on
Commit
cbd23a5
·
1 Parent(s): f84688d

View-aware GA with WHO biometry formulas

Browse files
backend/app/routes/__pycache__/gestational_age.cpython-310.pyc CHANGED
Binary files a/backend/app/routes/__pycache__/gestational_age.cpython-310.pyc and b/backend/app/routes/__pycache__/gestational_age.cpython-310.pyc differ
 
backend/app/routes/gestational_age.py CHANGED
@@ -1,14 +1,23 @@
1
  from fastapi import APIRouter, UploadFile, File, Query, HTTPException
2
  from fastapi.responses import JSONResponse
 
3
  from ..services.model import model_service
4
 
5
  router = APIRouter(prefix="/api/v1/gestational-age", tags=["Gestational Age"])
6
 
 
 
 
 
 
 
 
7
 
8
  @router.post("/")
9
  async def estimate_gestational_age(
10
  file: UploadFile = File(..., description="Fetal brain ultrasound file (DICOM or image)"),
11
- pixel_size: float = Query(default=0.1, ge=0.01, le=1.0, description="Pixel size in mm/pixel")
 
12
  ):
13
  """
14
  Estimate gestational age from fetal brain ultrasound.
@@ -19,8 +28,18 @@ async def estimate_gestational_age(
19
  For DICOM files, pixel spacing is automatically extracted from metadata.
20
  For image files, you must provide the pixel_size parameter.
21
 
 
 
 
22
  Returns estimated gestational age and head circumference percentiles.
23
  """
 
 
 
 
 
 
 
24
  try:
25
  # Read file bytes
26
  contents = await file.read()
@@ -28,14 +47,19 @@ async def estimate_gestational_age(
28
 
29
  # Estimate GA with automatic preprocessing
30
  ga_results, preprocessing_info = model_service.estimate_ga_from_file(
31
- contents, filename, pixel_size=pixel_size
32
  )
33
 
34
- return JSONResponse(content={
 
35
  "success": True,
36
  **ga_results,
 
37
  "preprocessing": preprocessing_info
38
- })
 
 
39
 
40
  except Exception as e:
41
  raise HTTPException(status_code=500, detail=str(e))
 
 
1
  from fastapi import APIRouter, UploadFile, File, Query, HTTPException
2
  from fastapi.responses import JSONResponse
3
+ from typing import Optional
4
  from ..services.model import model_service
5
 
6
  router = APIRouter(prefix="/api/v1/gestational-age", tags=["Gestational Age"])
7
 
8
+ # Views that support GA estimation (match keys from prompt_fetal_view.json)
9
+ GA_ELIGIBLE_VIEWS = [
10
+ "brain", # Head Circumference
11
+ "abdomen", # Abdominal Circumference
12
+ "femur", # Femur Length
13
+ ]
14
+
15
 
16
  @router.post("/")
17
  async def estimate_gestational_age(
18
  file: UploadFile = File(..., description="Fetal brain ultrasound file (DICOM or image)"),
19
+ pixel_size: float = Query(default=0.1, ge=0.01, le=1.0, description="Pixel size in mm/pixel"),
20
+ view: Optional[str] = Query(default=None, description="Anatomical view of the ultrasound")
21
  ):
22
  """
23
  Estimate gestational age from fetal brain ultrasound.
 
28
  For DICOM files, pixel spacing is automatically extracted from metadata.
29
  For image files, you must provide the pixel_size parameter.
30
 
31
+ The view parameter is optional but recommended. If provided, it will be
32
+ validated to ensure it's a brain view suitable for GA estimation.
33
+
34
  Returns estimated gestational age and head circumference percentiles.
35
  """
36
+ # Validate view if provided
37
+ if view and view not in GA_ELIGIBLE_VIEWS:
38
+ raise HTTPException(
39
+ status_code=400,
40
+ detail=f"View '{view}' is not supported for GA estimation. Supported views: {GA_ELIGIBLE_VIEWS}"
41
+ )
42
+
43
  try:
44
  # Read file bytes
45
  contents = await file.read()
 
47
 
48
  # Estimate GA with automatic preprocessing
49
  ga_results, preprocessing_info = model_service.estimate_ga_from_file(
50
+ contents, filename, pixel_size=pixel_size, view=view or "brain"
51
  )
52
 
53
+ # Add view info to response
54
+ result = {
55
  "success": True,
56
  **ga_results,
57
+ "view": view,
58
  "preprocessing": preprocessing_info
59
+ }
60
+
61
+ return JSONResponse(content=result)
62
 
63
  except Exception as e:
64
  raise HTTPException(status_code=500, detail=str(e))
65
+
backend/app/services/__pycache__/model.cpython-310.pyc CHANGED
Binary files a/backend/app/services/__pycache__/model.cpython-310.pyc and b/backend/app/services/__pycache__/model.cpython-310.pyc differ
 
backend/app/services/__pycache__/preprocessing.cpython-310.pyc CHANGED
Binary files a/backend/app/services/__pycache__/preprocessing.cpython-310.pyc and b/backend/app/services/__pycache__/preprocessing.cpython-310.pyc differ
 
backend/app/services/model.py CHANGED
@@ -14,14 +14,30 @@ MODEL_NAME = "numansaeed/fetalclip-model"
14
  INPUT_SIZE = 224
15
  TOP_N_PROBS = 15
16
 
17
- # GA Text prompts
18
- GA_TEXT_PROMPTS = [
19
- "Ultrasound image at {weeks} weeks and {day} days gestation focusing on the fetal brain, highlighting anatomical structures with a pixel spacing of {pixel_spacing} mm/pixel.",
20
- "Fetal ultrasound image at {weeks} weeks, {day} days of gestation, focusing on the developing brain, with a pixel spacing of {pixel_spacing} mm/pixel, highlighting the structures of the fetal brain.",
21
- "Fetal ultrasound image at {weeks} weeks and {day} days gestational age, highlighting the developing brain structures with a pixel spacing of {pixel_spacing} mm/pixel, providing important visual insights for ongoing prenatal assessments.",
22
- "Ultrasound image at {weeks} weeks and {day} days gestation, highlighting the fetal brain structures with a pixel spacing of {pixel_spacing} mm/pixel.",
23
- "Fetal ultrasound at {weeks} weeks and {day} days, showing a clear view of the developing brain, with an image pixel spacing of {pixel_spacing} mm/pixel."
24
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  LIST_GA_IN_DAYS = [weeks * 7 + days for weeks in range(14, 39) for days in range(0, 7)]
27
 
@@ -182,19 +198,55 @@ class FetalCLIPService:
182
  tmp = sorted(tmp, key=lambda x: x[0])
183
  return tmp[n // 2][0]
184
 
185
- def _get_hc_from_days(self, t: int, quartile: str = '0.5') -> float:
186
- """Calculate head circumference from gestational age."""
187
- t = t / 7
188
- params = {
189
- '0.025': [1.59317517131532e+0, 2.9459800552433e-1, -7.3860372566707e-3, 6.56951770216148e-5, 0e+0],
190
- '0.500': [2.09924879247164e+0, 2.53373656106037e-1, -6.05647816678282e-3, 5.14256072059917e-5, 0e+0],
191
- '0.975': [2.50074069629423e+0, 2.20067854715719e-1, -4.93623111462443e-3, 3.89066000946519e-5, 0e+0],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  }
193
- b0, b1, b2, b3, b4 = params[quartile]
194
- return np.exp(b0 + b1*t + b2*t**2 + b3*t**3 + b4*t**4)
 
 
 
 
 
 
195
 
196
- def estimate_gestational_age(self, image: Image.Image, pixel_size: float) -> Dict:
197
- """Estimate gestational age from preprocessed fetal brain ultrasound."""
198
  if self.model is None:
199
  raise RuntimeError("Model not loaded. Call load_model() first.")
200
 
@@ -209,10 +261,11 @@ class FetalCLIPService:
209
  image_features = self.model.encode_image(img_tensor)
210
  image_features /= image_features.norm(dim=-1, keepdim=True)
211
 
212
- # Get text features for all prompts
 
213
  text_features_list = [
214
  self._get_ga_text_features(template, pixel_spacing)
215
- for template in GA_TEXT_PROMPTS
216
  ]
217
 
218
  text_dot_prods = self._get_unnormalized_dot_products(image_features, text_features_list)
@@ -225,10 +278,33 @@ class FetalCLIPService:
225
  pred_weeks = pred_day // 7
226
  pred_days = pred_day % 7
227
 
228
- # Compute HC percentiles
229
- q025 = self._get_hc_from_days(pred_day, '0.025')
230
- q500 = self._get_hc_from_days(pred_day, '0.500')
231
- q975 = self._get_hc_from_days(pred_day, '0.975')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
  return {
234
  "gestational_age": {
@@ -236,14 +312,15 @@ class FetalCLIPService:
236
  "days": pred_days,
237
  "total_days": pred_day
238
  },
239
- "head_circumference": {
 
240
  "p2_5": round(q025, 2),
241
  "p50": round(q500, 2),
242
  "p97_5": round(q975, 2)
243
  }
244
  }
245
 
246
- def estimate_ga_from_file(self, file_bytes: bytes, filename: str, pixel_size: float) -> Tuple[Dict, Dict]:
247
  """
248
  Estimate GA from raw file bytes with automatic preprocessing.
249
 
@@ -257,8 +334,8 @@ class FetalCLIPService:
257
  if preprocessing_info["type"] == "dicom":
258
  pixel_size = preprocessing_info["metadata"].get("pixel_spacing", pixel_size)
259
 
260
- # Estimate GA
261
- ga_results = self.estimate_gestational_age(processed_image, pixel_size)
262
 
263
  return ga_results, preprocessing_info
264
 
 
14
  INPUT_SIZE = 224
15
  TOP_N_PROBS = 15
16
 
17
+ # GA Text prompts - view-specific prompts for brain, abdomen, and femur
18
+ GA_TEXT_PROMPTS = {
19
+ "brain": [
20
+ "Ultrasound image at {weeks} weeks and {day} days gestation focusing on the fetal brain, highlighting anatomical structures with a pixel spacing of {pixel_spacing} mm/pixel.",
21
+ "Fetal ultrasound image at {weeks} weeks, {day} days of gestation, focusing on the developing brain, with a pixel spacing of {pixel_spacing} mm/pixel, highlighting the structures of the fetal brain.",
22
+ "Fetal ultrasound image at {weeks} weeks and {day} days gestational age, highlighting the developing brain structures with a pixel spacing of {pixel_spacing} mm/pixel, providing important visual insights for ongoing prenatal assessments.",
23
+ "Ultrasound image at {weeks} weeks and {day} days gestation, highlighting the fetal brain structures with a pixel spacing of {pixel_spacing} mm/pixel.",
24
+ "Fetal ultrasound at {weeks} weeks and {day} days, showing a clear view of the developing brain, with an image pixel spacing of {pixel_spacing} mm/pixel."
25
+ ],
26
+ "abdomen": [
27
+ "Fetal ultrasound at {weeks} weeks and {day} days gestation, focusing on the abdominal area, highlighting structural development with a pixel spacing of {pixel_spacing} mm/pixel.",
28
+ "Ultrasound image at {weeks} weeks and {day} days gestation, focusing on the fetal abdomen, with pixel spacing of {pixel_spacing} mm/pixel, highlighting the structural development in this stage of gestation.",
29
+ "Ultrasound image of the fetal abdomen at {weeks} weeks and {day} days gestational age, highlighting anatomical structures with a pixel spacing of {pixel_spacing} mm/pixel.",
30
+ "Ultrasound image of the fetal abdomen at {weeks} weeks and {day} days gestational age, highlighting the development of abdominal structures, with a pixel spacing of {pixel_spacing} mm/pixel.",
31
+ "Fetal ultrasound image at {weeks} weeks and {day} days gestational age, focusing on the abdomen with a pixel spacing of {pixel_spacing} mm/pixel."
32
+ ],
33
+ "femur": [
34
+ "Ultrasound image at {weeks} weeks and {day} days gestation, focusing on the developing fetal femur, with a pixel spacing of {pixel_spacing} mm/pixel, highlighting bone length and structure.",
35
+ "The ultrasound image highlights the fetal femur at {weeks} weeks and {day} days of gestation, with a pixel spacing of {pixel_spacing} mm/pixel, providing a detailed view of the developing bone.",
36
+ "Ultrasound image at {weeks} weeks and {day} days gestation, focusing on the fetal femur, highlighting skeletal development at a pixel spacing of {pixel_spacing} mm/pixel.",
37
+ "Fetal ultrasound image at {weeks} weeks and {day} days gestation, highlighting the femur with a pixel spacing of {pixel_spacing} mm/pixel, providing a detailed view of bone development.",
38
+ "Ultrasound image at {weeks} weeks and {day} days gestation, highlighting the fetal femur with a pixel spacing of {pixel_spacing} mm/pixel."
39
+ ]
40
+ }
41
 
42
  LIST_GA_IN_DAYS = [weeks * 7 + days for weeks in range(14, 39) for days in range(0, 7)]
43
 
 
198
  tmp = sorted(tmp, key=lambda x: x[0])
199
  return tmp[n // 2][0]
200
 
201
+ def _get_biometry_from_ga(self, ga_days: int, biometry_type: str, percentile: str = '0.5') -> float:
202
+ """
203
+ Calculate expected fetal biometry from gestational age using WHO coefficients.
204
+
205
+ Formula: measurement = exp(b0 + b1*GA + b2*GA² + b3*GA³ + b4*GA⁴)
206
+ where GA is in weeks.
207
+
208
+ Args:
209
+ ga_days: Gestational age in days
210
+ biometry_type: 'HC', 'AC', or 'FL'
211
+ percentile: '0.025', '0.5', or '0.975'
212
+
213
+ Returns:
214
+ Expected measurement in mm
215
+ """
216
+ ga_weeks = ga_days / 7
217
+
218
+ # WHO Fetal Growth Coefficients (from coefficientsGlobalV3.csv)
219
+ WHO_COEFFICIENTS = {
220
+ # Head Circumference (mm)
221
+ 'HC': {
222
+ '0.025': [1.59317517131532e+0, 2.9459800552433e-1, -7.3860372566707e-3, 6.56951770216148e-5, 0e+0],
223
+ '0.5': [2.09924879247164e+0, 2.53373656106037e-1, -6.05647816678282e-3, 5.14256072059917e-5, 0e+0],
224
+ '0.975': [2.50074069629423e+0, 2.20067854715719e-1, -4.93623111462443e-3, 3.89066000946519e-5, 0e+0],
225
+ },
226
+ # Abdominal Circumference (mm)
227
+ 'AC': {
228
+ '0.025': [1.19202778944614e+0, 3.14756681991964e-1, -8.01581308902169e-3, 7.51395976546808e-5, 0e+0],
229
+ '0.5': [1.58552931028045e+0, 2.89936781915424e-1, -7.32651929135797e-3, 6.9261631643994e-5, 0e+0],
230
+ '0.975': [2.03674472691951e+0, 2.57138461817474e-1, -6.34918788914223e-3, 6.0053745113196e-5, 0e+0],
231
+ },
232
+ # Femur Length (mm) - uses all 5 coefficients
233
+ 'FL': {
234
+ '0.025': [-7.27187176976836e+0, 1.28298928826162e+0, -5.80601892487905e-2, 1.21314319801879e-3, -9.60171505470123e-6],
235
+ '0.5': [-5.54922620776446e+0, 1.09559990166124e+0, -5.01310925949098e-2, 1.0678072569586e-3, -8.63970606288493e-6],
236
+ '0.975': [-3.64483930811801e+0, 8.57028131514986e-1, -3.84005685481303e-2, 8.12062784461527e-4, -6.55932416998498e-6],
237
+ },
238
  }
239
+
240
+ if biometry_type not in WHO_COEFFICIENTS:
241
+ raise ValueError(f"Unknown biometry type: {biometry_type}")
242
+ if percentile not in WHO_COEFFICIENTS[biometry_type]:
243
+ raise ValueError(f"Unknown percentile: {percentile}")
244
+
245
+ b0, b1, b2, b3, b4 = WHO_COEFFICIENTS[biometry_type][percentile]
246
+ return np.exp(b0 + b1*ga_weeks + b2*ga_weeks**2 + b3*ga_weeks**3 + b4*ga_weeks**4)
247
 
248
+ def estimate_gestational_age(self, image: Image.Image, pixel_size: float, view: str = "brain") -> Dict:
249
+ """Estimate gestational age from preprocessed fetal ultrasound."""
250
  if self.model is None:
251
  raise RuntimeError("Model not loaded. Call load_model() first.")
252
 
 
261
  image_features = self.model.encode_image(img_tensor)
262
  image_features /= image_features.norm(dim=-1, keepdim=True)
263
 
264
+ # Get text features for all prompts for the specified view
265
+ view_prompts = GA_TEXT_PROMPTS.get(view, GA_TEXT_PROMPTS["brain"])
266
  text_features_list = [
267
  self._get_ga_text_features(template, pixel_spacing)
268
+ for template in view_prompts
269
  ]
270
 
271
  text_dot_prods = self._get_unnormalized_dot_products(image_features, text_features_list)
 
278
  pred_weeks = pred_day // 7
279
  pred_days = pred_day % 7
280
 
281
+ # Map view to biometry type
282
+ VIEW_TO_BIOMETRY = {
283
+ "brain": "HC",
284
+ "abdomen": "AC",
285
+ "femur": "FL"
286
+ }
287
+ biometry_type = VIEW_TO_BIOMETRY.get(view, "HC")
288
+
289
+ # Compute view-specific biometry percentiles using WHO formulas
290
+ q025 = self._get_biometry_from_ga(pred_day, biometry_type, '0.025')
291
+ q500 = self._get_biometry_from_ga(pred_day, biometry_type, '0.5')
292
+ q975 = self._get_biometry_from_ga(pred_day, biometry_type, '0.975')
293
+
294
+ # Biometry labels for response
295
+ BIOMETRY_LABELS = {
296
+ "HC": "head_circumference",
297
+ "AC": "abdominal_circumference",
298
+ "FL": "femur_length"
299
+ }
300
+ biometry_key = BIOMETRY_LABELS.get(biometry_type, "head_circumference")
301
+
302
+ # Biometry units
303
+ BIOMETRY_UNITS = {
304
+ "HC": "mm",
305
+ "AC": "mm",
306
+ "FL": "mm"
307
+ }
308
 
309
  return {
310
  "gestational_age": {
 
312
  "days": pred_days,
313
  "total_days": pred_day
314
  },
315
+ "view": view,
316
+ biometry_key: {
317
  "p2_5": round(q025, 2),
318
  "p50": round(q500, 2),
319
  "p97_5": round(q975, 2)
320
  }
321
  }
322
 
323
+ def estimate_ga_from_file(self, file_bytes: bytes, filename: str, pixel_size: float, view: str = "brain") -> Tuple[Dict, Dict]:
324
  """
325
  Estimate GA from raw file bytes with automatic preprocessing.
326
 
 
334
  if preprocessing_info["type"] == "dicom":
335
  pixel_size = preprocessing_info["metadata"].get("pixel_spacing", pixel_size)
336
 
337
+ # Estimate GA with the specified view
338
+ ga_results = self.estimate_gestational_age(processed_image, pixel_size, view)
339
 
340
  return ga_results, preprocessing_info
341
 
backend/app/services/preprocessing.py CHANGED
@@ -336,11 +336,9 @@ def preprocess_dicom(file_bytes: bytes) -> Tuple[Image.Image, Dict]:
336
  else:
337
  median = np.clip(dst * 255, 0, 255).astype(np.uint8)
338
 
339
- # Pad to square
340
  img = pad_to_square(median)
341
-
342
- # Resize
343
- img = cv2.resize(img, TARGET_SIZE, interpolation=INTERPOLATION)
344
 
345
  # Extract metadata
346
  try:
@@ -360,8 +358,9 @@ def preprocess_dicom(file_bytes: bytes) -> Tuple[Image.Image, Dict]:
360
  'original_size': (rows, columns),
361
  'original_pixel_spacing': orig_pixel_spacing,
362
  'fan_size': (fan.shape[0], fan.shape[1]),
363
- 'pixel_spacing': orig_pixel_spacing[0] if orig_pixel_spacing else 1.0,
364
- 'processed_size': TARGET_SIZE,
 
365
  }
366
 
367
  # Convert to PIL
@@ -381,7 +380,6 @@ def preprocess_dicom(file_bytes: bytes) -> Tuple[Image.Image, Dict]:
381
  "inpainting",
382
  "denoising" if SKIMAGE_AVAILABLE else "normalization",
383
  "square_padding",
384
- "resize_512"
385
  ]
386
 
387
  return img_pil, {
 
336
  else:
337
  median = np.clip(dst * 255, 0, 255).astype(np.uint8)
338
 
339
+ # Pad to square (model will resize to 224×224)
340
  img = pad_to_square(median)
341
+ padded_size = max(img.shape[:2])
 
 
342
 
343
  # Extract metadata
344
  try:
 
358
  'original_size': (rows, columns),
359
  'original_pixel_spacing': orig_pixel_spacing,
360
  'fan_size': (fan.shape[0], fan.shape[1]),
361
+ 'padded_size': padded_size,
362
+ 'pixel_spacing': orig_pixel_spacing[0], # Original spacing, model.py adjusts for resize
363
+ 'processed_size': (padded_size, padded_size),
364
  }
365
 
366
  # Convert to PIL
 
380
  "inpainting",
381
  "denoising" if SKIMAGE_AVAILABLE else "normalization",
382
  "square_padding",
 
383
  ]
384
 
385
  return img_pil, {
frontend/src/App.tsx CHANGED
@@ -5,6 +5,7 @@ import { Tabs } from './components/Tabs';
5
  import { ClassificationPage } from './pages/ClassificationPage';
6
  import { GestationalAgePage } from './pages/GestationalAgePage';
7
  import { HelpPage } from './pages/HelpPage';
 
8
  import { checkHealth, getFeedbackStats } from './lib/api';
9
 
10
  const tabs = [
@@ -52,46 +53,49 @@ function App() {
52
  }, [loadFeedbackStats]);
53
 
54
  return (
55
- <div className="h-screen flex flex-col bg-dark-bg overflow-hidden">
56
- {/* Header - fixed height */}
57
- <Header isConnected={isConnected} feedbackStats={feedbackStats} />
 
58
 
59
- {/* Tabs - fixed height */}
60
- <Tabs tabs={tabs} activeTab={activeTab} onChange={setActiveTab} />
61
 
62
- {/* Main content - fills remaining space */}
63
- <main className="flex-1 flex min-h-0 overflow-hidden">
64
- {activeTab === 'classification' && <ClassificationPage onFeedbackUpdate={loadFeedbackStats} />}
65
- {activeTab === 'gestational-age' && <GestationalAgePage />}
66
- {activeTab === 'help' && <HelpPage />}
67
- </main>
68
 
69
- {/* Footer - fixed height, always visible */}
70
- <footer className="flex-shrink-0 px-6 py-3 border-t border-dark-border bg-white">
71
- <div className="flex items-center justify-between text-xs">
72
- <span className="text-text-secondary">FetalCLIP • Foundation Model for Fetal Ultrasound Analysis</span>
73
- <div className="flex items-center gap-4">
74
- <a
75
- href="https://huggingface.co/numansaeed/fetalclip-model"
76
- target="_blank"
77
- rel="noopener noreferrer"
78
- className="text-accent-blue hover:text-accent-blue-hover transition-colors font-medium"
79
- >
80
- 🤗 Model Hub
81
- </a>
82
- <a
83
- href="https://arxiv.org/abs/2502.14807"
84
- target="_blank"
85
- rel="noopener noreferrer"
86
- className="text-accent-blue hover:text-accent-blue-hover transition-colors font-medium"
87
- >
88
- 📄 Paper
89
- </a>
 
90
  </div>
91
- </div>
92
- </footer>
93
- </div>
94
  );
95
  }
96
 
97
  export default App;
 
 
5
  import { ClassificationPage } from './pages/ClassificationPage';
6
  import { GestationalAgePage } from './pages/GestationalAgePage';
7
  import { HelpPage } from './pages/HelpPage';
8
+ import { ImageProvider } from './lib/ImageContext';
9
  import { checkHealth, getFeedbackStats } from './lib/api';
10
 
11
  const tabs = [
 
53
  }, [loadFeedbackStats]);
54
 
55
  return (
56
+ <ImageProvider>
57
+ <div className="h-screen flex flex-col bg-dark-bg overflow-hidden">
58
+ {/* Header - fixed height */}
59
+ <Header isConnected={isConnected} feedbackStats={feedbackStats} />
60
 
61
+ {/* Tabs - fixed height */}
62
+ <Tabs tabs={tabs} activeTab={activeTab} onChange={setActiveTab} />
63
 
64
+ {/* Main content - fills remaining space */}
65
+ <main className="flex-1 flex min-h-0 overflow-hidden">
66
+ {activeTab === 'classification' && <ClassificationPage onFeedbackUpdate={loadFeedbackStats} />}
67
+ {activeTab === 'gestational-age' && <GestationalAgePage />}
68
+ {activeTab === 'help' && <HelpPage />}
69
+ </main>
70
 
71
+ {/* Footer - fixed height, always visible */}
72
+ <footer className="flex-shrink-0 px-6 py-3 border-t border-dark-border bg-white">
73
+ <div className="flex items-center justify-between text-xs">
74
+ <span className="text-text-secondary">FetalCLIP • Foundation Model for Fetal Ultrasound Analysis</span>
75
+ <div className="flex items-center gap-4">
76
+ <a
77
+ href="https://huggingface.co/numansaeed/fetalclip-model"
78
+ target="_blank"
79
+ rel="noopener noreferrer"
80
+ className="text-accent-blue hover:text-accent-blue-hover transition-colors font-medium"
81
+ >
82
+ 🤗 Model Hub
83
+ </a>
84
+ <a
85
+ href="https://arxiv.org/abs/2502.14807"
86
+ target="_blank"
87
+ rel="noopener noreferrer"
88
+ className="text-accent-blue hover:text-accent-blue-hover transition-colors font-medium"
89
+ >
90
+ 📄 Paper
91
+ </a>
92
+ </div>
93
  </div>
94
+ </footer>
95
+ </div>
96
+ </ImageProvider>
97
  );
98
  }
99
 
100
  export default App;
101
+
frontend/src/components/FeedbackSection.tsx CHANGED
@@ -12,6 +12,7 @@ interface FeedbackSectionProps {
12
  imageHash?: string;
13
  preprocessedImageBase64?: string;
14
  onFeedbackSubmitted?: () => void;
 
15
  disabled?: boolean;
16
  }
17
 
@@ -25,6 +26,7 @@ export function FeedbackSection({
25
  imageHash,
26
  preprocessedImageBase64,
27
  onFeedbackSubmitted,
 
28
  disabled = false,
29
  }: FeedbackSectionProps) {
30
  const [feedbackState, setFeedbackState] = useState<'none' | 'correct' | 'incorrect' | 'not_sure'>('none');
@@ -85,6 +87,7 @@ export function FeedbackSection({
85
  const handleSubmitCorrection = async () => {
86
  if (!correctLabel) return;
87
  await submitFeedbackData(false, correctLabel, notes);
 
88
  };
89
 
90
  const handleSubmitNotSure = async () => {
@@ -207,8 +210,8 @@ export function FeedbackSection({
207
  onClick={handleSubmitCorrection}
208
  disabled={!correctLabel || isSubmitting}
209
  className={`flex-1 flex items-center justify-center gap-2 px-4 py-2 rounded-lg text-sm font-medium transition-colors ${!correctLabel || isSubmitting
210
- ? 'bg-gray-200 text-gray-400 cursor-not-allowed'
211
- : 'bg-green-600 hover:bg-green-700 text-white'
212
  }`}
213
  >
214
  {isSubmitting ? (
 
12
  imageHash?: string;
13
  preprocessedImageBase64?: string;
14
  onFeedbackSubmitted?: () => void;
15
+ onViewCorrected?: (correctedLabel: string) => void;
16
  disabled?: boolean;
17
  }
18
 
 
26
  imageHash,
27
  preprocessedImageBase64,
28
  onFeedbackSubmitted,
29
+ onViewCorrected,
30
  disabled = false,
31
  }: FeedbackSectionProps) {
32
  const [feedbackState, setFeedbackState] = useState<'none' | 'correct' | 'incorrect' | 'not_sure'>('none');
 
87
  const handleSubmitCorrection = async () => {
88
  if (!correctLabel) return;
89
  await submitFeedbackData(false, correctLabel, notes);
90
+ onViewCorrected?.(correctLabel);
91
  };
92
 
93
  const handleSubmitNotSure = async () => {
 
210
  onClick={handleSubmitCorrection}
211
  disabled={!correctLabel || isSubmitting}
212
  className={`flex-1 flex items-center justify-center gap-2 px-4 py-2 rounded-lg text-sm font-medium transition-colors ${!correctLabel || isSubmitting
213
+ ? 'bg-gray-200 text-gray-400 cursor-not-allowed'
214
+ : 'bg-green-600 hover:bg-green-700 text-white'
215
  }`}
216
  >
217
  {isSubmitting ? (
frontend/src/lib/ImageContext.tsx ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { createContext, useContext, useState, ReactNode, useCallback } from 'react';
2
+ import { ClassificationResult } from './api';
3
+
4
+ export interface ImageState {
5
+ // Current image
6
+ file: File | null;
7
+ preview: string | null;
8
+ processedImage: string | null;
9
+
10
+ // Classification results
11
+ classificationResults: ClassificationResult[] | null;
12
+
13
+ // View (corrected takes priority)
14
+ predictedView: string | null;
15
+ correctedView: string | null;
16
+
17
+ // Session ID for feedback
18
+ sessionId: string;
19
+ }
20
+
21
+ interface ImageContextType extends ImageState {
22
+ // Actions
23
+ setFile: (file: File | null, preview: string | null) => void;
24
+ setClassificationResults: (results: ClassificationResult[] | null, processedImage?: string | null) => void;
25
+ setCorrectedView: (view: string | null) => void;
26
+ resetState: () => void;
27
+
28
+ // Computed
29
+ currentView: string | null; // Returns correctedView if set, otherwise topPrediction
30
+ isViewGAEligible: boolean;
31
+ }
32
+
33
+ // GA-eligible views (match keys from prompt_fetal_view.json)
34
+ export const GA_ELIGIBLE_VIEWS = [
35
+ "brain", // Head Circumference
36
+ "abdomen", // Abdominal Circumference
37
+ "femur", // Femur Length
38
+ ];
39
+
40
+ // Display names for GA biometry types
41
+ export const GA_BIOMETRY_LABELS: Record<string, string> = {
42
+ "brain": "Head Circumference (HC)",
43
+ "abdomen": "Abdominal Circumference (AC)",
44
+ "femur": "Femur Length (FL)",
45
+ };
46
+
47
+ const generateSessionId = () => {
48
+ return Math.random().toString(36).substring(2, 10);
49
+ };
50
+
51
+ const initialState: ImageState = {
52
+ file: null,
53
+ preview: null,
54
+ processedImage: null,
55
+ classificationResults: null,
56
+ predictedView: null,
57
+ correctedView: null,
58
+ sessionId: generateSessionId(),
59
+ };
60
+
61
+ const ImageContext = createContext<ImageContextType | null>(null);
62
+
63
+ export function ImageProvider({ children }: { children: ReactNode }) {
64
+ const [state, setState] = useState<ImageState>(initialState);
65
+
66
+ const setFile = useCallback((file: File | null, preview: string | null) => {
67
+ setState(prev => ({
68
+ ...prev,
69
+ file,
70
+ preview,
71
+ processedImage: null,
72
+ classificationResults: null,
73
+ predictedView: null,
74
+ correctedView: null,
75
+ }));
76
+ }, []);
77
+
78
+ const setClassificationResults = useCallback((
79
+ results: ClassificationResult[] | null,
80
+ processedImage: string | null = null
81
+ ) => {
82
+ setState(prev => ({
83
+ ...prev,
84
+ classificationResults: results,
85
+ processedImage: processedImage || prev.processedImage,
86
+ predictedView: results && results.length > 0 ? results[0].label : null,
87
+ }));
88
+ }, []);
89
+
90
+ const setCorrectedView = useCallback((view: string | null) => {
91
+ setState(prev => ({
92
+ ...prev,
93
+ correctedView: view,
94
+ }));
95
+ }, []);
96
+
97
+ const resetState = useCallback(() => {
98
+ setState({
99
+ ...initialState,
100
+ sessionId: generateSessionId(),
101
+ });
102
+ }, []);
103
+
104
+ // Computed: current view (corrected takes priority)
105
+ const currentView = state.correctedView || state.predictedView;
106
+
107
+ // Computed: is view eligible for GA estimation
108
+ const isViewGAEligible = currentView ? GA_ELIGIBLE_VIEWS.includes(currentView) : false;
109
+
110
+ return (
111
+ <ImageContext.Provider
112
+ value={{
113
+ ...state,
114
+ setFile,
115
+ setClassificationResults,
116
+ setCorrectedView,
117
+ resetState,
118
+ currentView,
119
+ isViewGAEligible,
120
+ }}
121
+ >
122
+ {children}
123
+ </ImageContext.Provider>
124
+ );
125
+ }
126
+
127
+ export function useImageContext() {
128
+ const context = useContext(ImageContext);
129
+ if (!context) {
130
+ throw new Error('useImageContext must be used within an ImageProvider');
131
+ }
132
+ return context;
133
+ }
frontend/src/lib/api.ts CHANGED
@@ -58,11 +58,16 @@ export async function classifyImage(file: File, topK: number = 5): Promise<Class
58
  return response.json();
59
  }
60
 
61
- export async function estimateGestationalAge(file: File, pixelSize: number): Promise<GestationalAgeResponse> {
62
  const formData = new FormData();
63
  formData.append('file', file);
64
 
65
- const response = await fetch(`${API_BASE}/api/v1/gestational-age/?pixel_size=${pixelSize}`, {
 
 
 
 
 
66
  method: 'POST',
67
  body: formData,
68
  });
@@ -286,19 +291,19 @@ export async function deleteFeedback(feedbackId: string): Promise<void> {
286
  }
287
  }
288
 
289
- // List of all fetal view labels
290
  export const FETAL_VIEW_LABELS = [
291
- "Fetal abdomen",
292
- "Fetal brain (Transventricular)",
293
- "Fetal brain (Transcerebellar)",
294
- "Fetal femur",
295
- "Fetal thorax (4-chamber view)",
296
- "Fetal face (Lips)",
297
- "Fetal brain (Transthalamic)",
298
- "Other",
299
- "Maternal anatomy",
300
- "Fetal face (Profile)",
301
- "Fetal thorax (3-vessel view)",
302
- "Fetal thorax (LVOT)",
303
- "Fetal thorax (RVOT)"
304
  ];
 
58
  return response.json();
59
  }
60
 
61
+ export async function estimateGestationalAge(file: File, pixelSize: number, view?: string): Promise<GestationalAgeResponse> {
62
  const formData = new FormData();
63
  formData.append('file', file);
64
 
65
+ let url = `${API_BASE}/api/v1/gestational-age/?pixel_size=${pixelSize}`;
66
+ if (view) {
67
+ url += `&view=${encodeURIComponent(view)}`;
68
+ }
69
+
70
+ const response = await fetch(url, {
71
  method: 'POST',
72
  body: formData,
73
  });
 
291
  }
292
  }
293
 
294
+ // List of all fetal view labels (must match keys in prompt_fetal_view.json)
295
  export const FETAL_VIEW_LABELS = [
296
+ "abdomen",
297
+ "brain",
298
+ "femur",
299
+ "heart",
300
+ "kidney",
301
+ "lips_nose",
302
+ "profile_patient",
303
+ "spine",
304
+ "cervix",
305
+ "cord",
306
+ "diaphragm",
307
+ "feet",
308
+ "orbit"
309
  ];
frontend/src/pages/ClassificationPage.tsx CHANGED
@@ -1,13 +1,12 @@
1
  import { useState, useCallback, useEffect } from 'react';
2
- import { Search, ChevronLeft, ChevronRight, FolderOpen } from 'lucide-react';
3
  import { Panel } from '../components/Panel';
4
  import { FileUpload } from '../components/FileUpload';
5
- import { Button } from '../components/Button';
6
- import { Slider } from '../components/Slider';
7
  import { ResultsCard } from '../components/ResultsCard';
8
  import { PreprocessingBadge } from '../components/PreprocessingBadge';
9
  import { FeedbackSection } from '../components/FeedbackSection';
10
  import { SessionHistory } from '../components/SessionHistory';
 
11
  import {
12
  classifyImage,
13
  getFilePreview,
@@ -24,6 +23,8 @@ interface ClassificationPageProps {
24
  }
25
 
26
  export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps) {
 
 
27
  // Session state
28
  const [sessionId, setSessionId] = useState<string>('');
29
  const [feedbackRefresh, setFeedbackRefresh] = useState(0);
@@ -33,7 +34,7 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
33
  const [preview, setPreview] = useState<string | null>(null);
34
  const [isLoadingPreview, setIsLoadingPreview] = useState(false);
35
 
36
- // Multiple files state (folder)
37
  const [files, setFiles] = useState<File[]>([]);
38
  const [currentIndex, setCurrentIndex] = useState(0);
39
 
@@ -48,7 +49,7 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
48
  // Image view tab
49
  const [imageTab, setImageTab] = useState<'input' | 'processed'>('input');
50
 
51
- // Initialize session on mount
52
  useEffect(() => {
53
  const initSession = async () => {
54
  try {
@@ -62,13 +63,11 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
62
  }, []);
63
 
64
  const loadPreview = useCallback(async (selectedFile: File) => {
65
- // For regular images, create local preview
66
  if (!isDicomFile(selectedFile.name)) {
67
  setPreview(URL.createObjectURL(selectedFile));
68
  return;
69
  }
70
 
71
- // For DICOM, fetch preview from backend
72
  setIsLoadingPreview(true);
73
  try {
74
  const response = await getFilePreview(selectedFile);
@@ -99,10 +98,8 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
99
  const fileList = e.target.files;
100
  if (!fileList) return;
101
 
102
- // Accept both images and DICOM files
103
  const validFiles = Array.from(fileList).filter(f =>
104
- f.type.startsWith('image/') ||
105
- isDicomFile(f.name)
106
  ).sort((a, b) => a.name.localeCompare(b.name));
107
 
108
  if (validFiles.length > 0) {
@@ -150,13 +147,18 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
150
  setResults(response.predictions);
151
  setPreprocessingInfo(response.preprocessing);
152
 
153
- // Use the processed image from backend
154
- if (response.preprocessing.processed_image_base64) {
155
- setProcessedImage(`data:image/png;base64,${response.preprocessing.processed_image_base64}`);
 
 
 
156
  }
157
  setImageTab('processed');
158
 
159
- // Record image analyzed for session stats
 
 
160
  if (sessionId) {
161
  await recordImageAnalyzed(sessionId);
162
  }
@@ -175,20 +177,22 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
175
  };
176
 
177
  const fileType = file ? getFileType(file.name) : null;
 
178
 
179
  return (
180
  <div className="flex flex-1 min-h-0 overflow-hidden">
181
- {/* Left Panel - Image */}
182
- <div className="w-1/2 border-r border-dark-border bg-white flex flex-col min-h-0">
183
- <Panel title="Image" className="flex-1 flex flex-col min-h-0">
184
- <div className="flex flex-col h-full gap-3 overflow-hidden">
185
- {/* Image Tabs */}
186
- <div className="flex-shrink-0 flex gap-1 bg-dark-input p-1 rounded-xl">
 
187
  <button
188
  onClick={() => setImageTab('input')}
189
- className={`flex-1 px-4 py-2 text-xs font-semibold rounded-lg transition-all ${imageTab === 'input'
190
- ? 'bg-nvidia-green text-white shadow-md'
191
- : 'text-text-secondary hover:text-text-primary hover:bg-white'
192
  }`}
193
  >
194
  Input
@@ -196,144 +200,160 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
196
  <button
197
  onClick={() => setImageTab('processed')}
198
  disabled={!processedImage}
199
- className={`flex-1 px-4 py-2 text-xs font-semibold rounded-lg transition-all ${imageTab === 'processed'
200
- ? 'bg-nvidia-green text-white shadow-md'
201
- : 'text-text-secondary hover:text-text-primary hover:bg-white disabled:opacity-40 disabled:cursor-not-allowed disabled:hover:bg-transparent'
202
  }`}
203
  >
204
  Processed
205
  </button>
206
  </div>
207
 
208
- {/* Image Display */}
209
- <div className="flex-1 min-h-0 overflow-hidden">
210
- {imageTab === 'input' ? (
211
- <FileUpload
212
- onUpload={handleSingleUpload}
213
- preview={preview}
214
- currentFile={file}
215
- isLoading={isLoadingPreview}
216
- />
217
- ) : (
218
- <div className="h-full w-full bg-slate-900 border border-dark-border rounded-xl overflow-hidden flex items-center justify-center">
219
- {processedImage ? (
220
- <img
221
- src={processedImage}
222
- alt="Processed"
223
- className="max-w-full max-h-full w-full h-full object-contain"
224
- />
225
- ) : (
226
- <p className="text-white/60 text-sm">Run classification to see processed image</p>
227
- )}
228
- </div>
229
- )}
230
- </div>
231
 
232
- {/* Controls Section */}
233
- <div className="flex-shrink-0 space-y-3">
234
- {/* Folder Upload */}
235
- <label className="flex items-center gap-2 px-4 py-2.5 bg-dark-input border border-dark-border rounded-xl cursor-pointer hover:border-nvidia-green/50 hover:bg-nvidia-green/5 transition-all">
236
- <FolderOpen className="w-4 h-4 text-text-secondary" />
237
- <span className="text-sm text-text-secondary font-medium">Load folder...</span>
238
- <input
239
- type="file"
240
- webkitdirectory=""
241
- directory=""
242
- multiple
243
- className="hidden"
244
- onChange={handleFolderUpload}
245
- />
246
- </label>
247
-
248
- {/* Navigation */}
249
- {files.length > 1 && (
250
- <div className="flex items-center justify-between bg-dark-input rounded-xl px-4 py-2.5 border border-dark-border">
251
- <Button
252
- variant="secondary"
253
- onClick={() => navigateImage('prev')}
254
- disabled={currentIndex === 0}
255
- className="!px-2 !py-1 !shadow-none"
256
- >
257
- <ChevronLeft className="w-4 h-4" />
258
- </Button>
259
- <span className="text-sm text-text-secondary font-medium">
260
- {currentIndex + 1} / {files.length}
261
- </span>
262
- <Button
263
- variant="secondary"
264
- onClick={() => navigateImage('next')}
265
- disabled={currentIndex === files.length - 1}
266
- className="!px-2 !py-1 !shadow-none"
267
- >
268
- <ChevronRight className="w-4 h-4" />
269
- </Button>
270
- </div>
271
- )}
272
 
273
- {/* File name */}
274
- {file && (
275
- <div className="text-xs text-text-muted truncate px-1 font-medium">
276
- {file.name}
277
- </div>
278
- )}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
- {/* Preprocessing Badge */}
281
- {(fileType || preprocessingInfo) && (
282
- <PreprocessingBadge
283
- info={preprocessingInfo}
284
- fileType={fileType}
285
- compact
286
- />
287
- )}
288
 
289
- {/* Settings */}
290
- <Slider
291
- label="Top Predictions"
 
292
  value={topK}
293
- onChange={setTopK}
294
- min={1}
295
- max={13}
296
- info="Number of predictions to display"
297
- />
298
-
299
- {/* Classify Button */}
300
- <Button
301
- onClick={handleClassify}
302
- disabled={!file}
303
- isLoading={isLoading}
304
- icon={<Search className="w-4 h-4" />}
305
  >
306
- Classify View
307
- </Button>
 
 
 
308
 
309
- {error && (
310
- <div className="p-3 bg-red-50 border border-red-200 rounded-xl">
311
- <p className="text-red-600 text-xs font-medium">{error}</p>
312
- </div>
 
 
 
 
 
 
 
 
 
 
 
313
  )}
314
- </div>
 
315
  </div>
316
- </Panel>
 
 
 
 
 
 
 
 
317
  </div>
318
 
319
- {/* Right Panel - Results */}
320
- <div className="w-1/2 bg-dark-bg flex flex-col min-h-0">
321
- <Panel
322
- title="Results"
323
- action={
324
- preprocessingInfo && (
325
- <span className={`text-xs px-2.5 py-1 rounded-full font-semibold ${preprocessingInfo.pipeline === 'full'
326
- ? 'bg-nvidia-green/10 text-nvidia-green'
327
- : 'bg-amber-500/10 text-amber-600'
328
- }`}>
329
- {preprocessingInfo.pipeline === 'full' ? 'Full Pipeline' : 'Basic Pipeline'}
330
- </span>
331
- )
332
- }
333
- className="flex-1 flex flex-col min-h-0"
334
- >
335
- <div className="flex-1 overflow-y-auto space-y-4 pb-6">
336
- <ResultsCard results={results} isLoading={isLoading} />
337
 
338
  {/* Feedback Section */}
339
  {results && results.length > 0 && file && (
@@ -345,6 +365,7 @@ export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps
345
  topPrediction={results[0]}
346
  preprocessedImageBase64={processedImage ? processedImage.split(',')[1] : undefined}
347
  onFeedbackSubmitted={handleFeedbackSubmitted}
 
348
  />
349
  )}
350
 
 
1
  import { useState, useCallback, useEffect } from 'react';
2
+ import { Search, ChevronLeft, ChevronRight, FolderOpen, Upload, AlertTriangle } from 'lucide-react';
3
  import { Panel } from '../components/Panel';
4
  import { FileUpload } from '../components/FileUpload';
 
 
5
  import { ResultsCard } from '../components/ResultsCard';
6
  import { PreprocessingBadge } from '../components/PreprocessingBadge';
7
  import { FeedbackSection } from '../components/FeedbackSection';
8
  import { SessionHistory } from '../components/SessionHistory';
9
+ import { useImageContext } from '../lib/ImageContext';
10
  import {
11
  classifyImage,
12
  getFilePreview,
 
23
  }
24
 
25
  export function ClassificationPage({ onFeedbackUpdate }: ClassificationPageProps) {
26
+ const imageContext = useImageContext();
27
+
28
  // Session state
29
  const [sessionId, setSessionId] = useState<string>('');
30
  const [feedbackRefresh, setFeedbackRefresh] = useState(0);
 
34
  const [preview, setPreview] = useState<string | null>(null);
35
  const [isLoadingPreview, setIsLoadingPreview] = useState(false);
36
 
37
+ // Multiple files state
38
  const [files, setFiles] = useState<File[]>([]);
39
  const [currentIndex, setCurrentIndex] = useState(0);
40
 
 
49
  // Image view tab
50
  const [imageTab, setImageTab] = useState<'input' | 'processed'>('input');
51
 
52
+ // Initialize session
53
  useEffect(() => {
54
  const initSession = async () => {
55
  try {
 
63
  }, []);
64
 
65
  const loadPreview = useCallback(async (selectedFile: File) => {
 
66
  if (!isDicomFile(selectedFile.name)) {
67
  setPreview(URL.createObjectURL(selectedFile));
68
  return;
69
  }
70
 
 
71
  setIsLoadingPreview(true);
72
  try {
73
  const response = await getFilePreview(selectedFile);
 
98
  const fileList = e.target.files;
99
  if (!fileList) return;
100
 
 
101
  const validFiles = Array.from(fileList).filter(f =>
102
+ f.type.startsWith('image/') || isDicomFile(f.name)
 
103
  ).sort((a, b) => a.name.localeCompare(b.name));
104
 
105
  if (validFiles.length > 0) {
 
147
  setResults(response.predictions);
148
  setPreprocessingInfo(response.preprocessing);
149
 
150
+ const processedImageData = response.preprocessing.processed_image_base64
151
+ ? `data:image/png;base64,${response.preprocessing.processed_image_base64}`
152
+ : null;
153
+
154
+ if (processedImageData) {
155
+ setProcessedImage(processedImageData);
156
  }
157
  setImageTab('processed');
158
 
159
+ imageContext.setFile(file, preview);
160
+ imageContext.setClassificationResults(response.predictions, processedImageData);
161
+
162
  if (sessionId) {
163
  await recordImageAnalyzed(sessionId);
164
  }
 
177
  };
178
 
179
  const fileType = file ? getFileType(file.name) : null;
180
+ const displayImage = imageTab === 'processed' && processedImage ? processedImage : preview;
181
 
182
  return (
183
  <div className="flex flex-1 min-h-0 overflow-hidden">
184
+ {/* Left Panel - Image (60%) */}
185
+ <div className="w-3/5 border-r border-dark-border bg-slate-900 flex flex-col min-h-0">
186
+ {/* Compact Header */}
187
+ <div className="flex-shrink-0 px-4 py-2 bg-slate-800 border-b border-slate-700 flex items-center justify-between">
188
+ <div className="flex items-center gap-3">
189
+ {/* Image Tab Toggle */}
190
+ <div className="flex gap-1 bg-slate-700 p-0.5 rounded-lg">
191
  <button
192
  onClick={() => setImageTab('input')}
193
+ className={`px-3 py-1 text-xs font-medium rounded-md transition-all ${imageTab === 'input'
194
+ ? 'bg-nvidia-green text-white'
195
+ : 'text-slate-400 hover:text-white'
196
  }`}
197
  >
198
  Input
 
200
  <button
201
  onClick={() => setImageTab('processed')}
202
  disabled={!processedImage}
203
+ className={`px-3 py-1 text-xs font-medium rounded-md transition-all ${imageTab === 'processed'
204
+ ? 'bg-nvidia-green text-white'
205
+ : 'text-slate-400 hover:text-white disabled:opacity-40 disabled:cursor-not-allowed'
206
  }`}
207
  >
208
  Processed
209
  </button>
210
  </div>
211
 
212
+ {/* File info */}
213
+ {file && (
214
+ <span className="text-xs text-slate-400 truncate max-w-[150px]">
215
+ {file.name}
216
+ </span>
217
+ )}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
+ {/* DICOM badge */}
220
+ {fileType === 'dicom' && (
221
+ <span className="px-2 py-0.5 bg-nvidia-green/20 text-nvidia-green text-xs rounded-full font-medium">
222
+ DICOM
223
+ </span>
224
+ )}
225
+ </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
+ {/* Folder navigation */}
228
+ {files.length > 1 && (
229
+ <div className="flex items-center gap-2">
230
+ <button
231
+ onClick={() => navigateImage('prev')}
232
+ disabled={currentIndex === 0}
233
+ className="p-1 text-slate-400 hover:text-white disabled:opacity-40"
234
+ >
235
+ <ChevronLeft className="w-4 h-4" />
236
+ </button>
237
+ <span className="text-xs text-slate-400">
238
+ {currentIndex + 1}/{files.length}
239
+ </span>
240
+ <button
241
+ onClick={() => navigateImage('next')}
242
+ disabled={currentIndex === files.length - 1}
243
+ className="p-1 text-slate-400 hover:text-white disabled:opacity-40"
244
+ >
245
+ <ChevronRight className="w-4 h-4" />
246
+ </button>
247
+ </div>
248
+ )}
249
+ </div>
250
+
251
+ {/* Image Display - fills remaining space */}
252
+ <div className="flex-1 min-h-0 p-4">
253
+ {displayImage ? (
254
+ <img
255
+ src={displayImage}
256
+ alt="Ultrasound"
257
+ className="w-full h-full object-contain rounded-lg"
258
+ />
259
+ ) : (
260
+ <FileUpload
261
+ onUpload={handleSingleUpload}
262
+ preview={null}
263
+ currentFile={null}
264
+ isLoading={isLoadingPreview}
265
+ />
266
+ )}
267
+ </div>
268
+
269
+ {/* Compact Control Bar */}
270
+ <div className="flex-shrink-0 px-4 py-3 bg-slate-800 border-t border-slate-700">
271
+ <div className="flex items-center gap-3">
272
+ {/* Upload/Folder buttons */}
273
+ <label className="flex items-center gap-1.5 px-3 py-1.5 bg-slate-700 hover:bg-slate-600 rounded-lg cursor-pointer transition-colors">
274
+ <Upload className="w-3.5 h-3.5 text-slate-300" />
275
+ <span className="text-xs text-slate-300 font-medium">Upload</span>
276
+ <input
277
+ type="file"
278
+ accept="image/*,.dcm,.dicom"
279
+ className="hidden"
280
+ onChange={(e) => e.target.files?.[0] && handleSingleUpload(e.target.files[0])}
281
+ />
282
+ </label>
283
+
284
+ <label className="flex items-center gap-1.5 px-3 py-1.5 bg-slate-700 hover:bg-slate-600 rounded-lg cursor-pointer transition-colors">
285
+ <FolderOpen className="w-3.5 h-3.5 text-slate-300" />
286
+ <span className="text-xs text-slate-300 font-medium">Folder</span>
287
+ <input
288
+ type="file"
289
+ webkitdirectory=""
290
+ directory=""
291
+ multiple
292
+ className="hidden"
293
+ onChange={handleFolderUpload}
294
+ />
295
+ </label>
296
 
297
+ <div className="w-px h-6 bg-slate-600" />
 
 
 
 
 
 
 
298
 
299
+ {/* Top-K selector */}
300
+ <div className="flex items-center gap-1.5">
301
+ <span className="text-xs text-slate-400">Top</span>
302
+ <select
303
  value={topK}
304
+ onChange={(e) => setTopK(parseInt(e.target.value))}
305
+ className="px-2 py-1 bg-slate-700 border border-slate-600 rounded text-xs text-white"
 
 
 
 
 
 
 
 
 
 
306
  >
307
+ {[3, 5, 10, 13].map(k => (
308
+ <option key={k} value={k}>{k}</option>
309
+ ))}
310
+ </select>
311
+ </div>
312
 
313
+ <div className="flex-1" />
314
+
315
+ {/* Classify Button */}
316
+ <button
317
+ onClick={handleClassify}
318
+ disabled={!file || isLoading}
319
+ className={`flex items-center gap-2 px-4 py-1.5 rounded-lg text-sm font-semibold transition-all ${!file
320
+ ? 'bg-slate-600 text-slate-400 cursor-not-allowed'
321
+ : 'bg-nvidia-green text-white hover:bg-nvidia-green-hover shadow-lg'
322
+ }`}
323
+ >
324
+ {isLoading ? (
325
+ <div className="w-4 h-4 border-2 border-white/30 border-t-white rounded-full animate-spin" />
326
+ ) : (
327
+ <Search className="w-4 h-4" />
328
  )}
329
+ Classify
330
+ </button>
331
  </div>
332
+
333
+ {/* Error row */}
334
+ {error && (
335
+ <div className="mt-2 p-2 rounded-lg flex items-center gap-2 text-xs bg-red-500/10 text-red-400">
336
+ <AlertTriangle className="w-3.5 h-3.5 flex-shrink-0" />
337
+ {error}
338
+ </div>
339
+ )}
340
+ </div>
341
  </div>
342
 
343
+ {/* Right Panel - Results (40%) */}
344
+ <div className="w-2/5 bg-white flex flex-col min-h-0">
345
+ <Panel title="Results" className="flex-1 flex flex-col min-h-0">
346
+ <div className="flex-1 overflow-y-auto space-y-4">
347
+ {/* Preprocessing Badge */}
348
+ {(fileType || preprocessingInfo) && (
349
+ <PreprocessingBadge info={preprocessingInfo} fileType={fileType} />
350
+ )}
351
+
352
+ {/* Results Card */}
353
+ <ResultsCard
354
+ results={results}
355
+ isLoading={isLoading}
356
+ />
 
 
 
 
357
 
358
  {/* Feedback Section */}
359
  {results && results.length > 0 && file && (
 
365
  topPrediction={results[0]}
366
  preprocessedImageBase64={processedImage ? processedImage.split(',')[1] : undefined}
367
  onFeedbackSubmitted={handleFeedbackSubmitted}
368
+ onViewCorrected={(correctedLabel) => imageContext.setCorrectedView(correctedLabel)}
369
  />
370
  )}
371
 
frontend/src/pages/GestationalAgePage.tsx CHANGED
@@ -1,23 +1,37 @@
1
- import { useState, useCallback } from 'react';
2
- import { BarChart3, ChevronLeft, ChevronRight, FolderOpen } from 'lucide-react';
3
  import { Panel } from '../components/Panel';
4
  import { FileUpload } from '../components/FileUpload';
5
- import { Button } from '../components/Button';
6
- import { NumberInput } from '../components/NumberInput';
7
  import { GAResultsCard } from '../components/GAResultsCard';
8
  import { PreprocessingBadge } from '../components/PreprocessingBadge';
9
- import { estimateGestationalAge, getFilePreview, getFileType, isDicomFile, type GestationalAgeResponse, type PreprocessingInfo } from '../lib/api';
 
 
 
 
 
 
 
 
 
10
 
11
  export function GestationalAgePage() {
12
- // File state
 
 
 
13
  const [file, setFile] = useState<File | null>(null);
14
  const [preview, setPreview] = useState<string | null>(null);
15
  const [isLoadingPreview, setIsLoadingPreview] = useState(false);
16
-
17
  // Multiple files state (folder)
18
  const [files, setFiles] = useState<File[]>([]);
19
  const [currentIndex, setCurrentIndex] = useState(0);
20
-
 
 
 
 
21
  // Settings & results
22
  const [pixelSize, setPixelSize] = useState(0.1);
23
  const [results, setResults] = useState<GestationalAgeResponse | null>(null);
@@ -25,18 +39,40 @@ export function GestationalAgePage() {
25
  const [processedImage, setProcessedImage] = useState<string | null>(null);
26
  const [isLoading, setIsLoading] = useState(false);
27
  const [error, setError] = useState<string | null>(null);
28
-
29
  // Image view tab
30
  const [imageTab, setImageTab] = useState<'input' | 'processed'>('input');
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  const loadPreview = useCallback(async (selectedFile: File) => {
33
- // For regular images, create local preview
34
  if (!isDicomFile(selectedFile.name)) {
35
  setPreview(URL.createObjectURL(selectedFile));
36
  return;
37
  }
38
-
39
- // For DICOM, fetch preview from backend
40
  setIsLoadingPreview(true);
41
  try {
42
  const response = await getFilePreview(selectedFile);
@@ -60,18 +96,20 @@ export function GestationalAgePage() {
60
  setProcessedImage(null);
61
  setError(null);
62
  setImageTab('input');
 
 
63
  loadPreview(uploadedFile);
64
  }, [loadPreview]);
65
 
66
  const handleFolderUpload = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
67
  const fileList = e.target.files;
68
  if (!fileList) return;
69
-
70
- const validFiles = Array.from(fileList).filter(f =>
71
- f.type.startsWith('image/') ||
72
  isDicomFile(f.name)
73
  ).sort((a, b) => a.name.localeCompare(b.name));
74
-
75
  if (validFiles.length > 0) {
76
  setFiles(validFiles);
77
  setCurrentIndex(0);
@@ -81,20 +119,22 @@ export function GestationalAgePage() {
81
  setProcessedImage(null);
82
  setError(null);
83
  setImageTab('input');
 
 
84
  loadPreview(validFiles[0]);
85
  }
86
  }, [loadPreview]);
87
 
88
  const navigateImage = useCallback((direction: 'prev' | 'next') => {
89
  if (files.length === 0) return;
90
-
91
  let newIndex = currentIndex;
92
  if (direction === 'prev' && currentIndex > 0) {
93
  newIndex = currentIndex - 1;
94
  } else if (direction === 'next' && currentIndex < files.length - 1) {
95
  newIndex = currentIndex + 1;
96
  }
97
-
98
  if (newIndex !== currentIndex) {
99
  setCurrentIndex(newIndex);
100
  setFile(files[newIndex]);
@@ -102,22 +142,28 @@ export function GestationalAgePage() {
102
  setPreprocessingInfo(null);
103
  setProcessedImage(null);
104
  setImageTab('input');
 
 
105
  loadPreview(files[newIndex]);
106
  }
107
  }, [files, currentIndex, loadPreview]);
108
 
 
 
 
 
 
109
  const handleEstimate = async () => {
110
- if (!file) return;
111
 
112
  setIsLoading(true);
113
  setError(null);
114
 
115
  try {
116
- const response = await estimateGestationalAge(file, pixelSize);
117
  setResults(response);
118
  setPreprocessingInfo(response.preprocessing);
119
-
120
- // Use the processed image from backend
121
  if (response.preprocessing.processed_image_base64) {
122
  setProcessedImage(`data:image/png;base64,${response.preprocessing.processed_image_base64}`);
123
  }
@@ -133,180 +179,251 @@ export function GestationalAgePage() {
133
 
134
  const fileType = file ? getFileType(file.name) : null;
135
 
 
 
 
 
 
 
 
 
 
 
136
  return (
137
  <div className="flex flex-1 min-h-0 overflow-hidden">
138
- {/* Left Panel - Image */}
139
- <div className="w-1/2 border-r border-dark-border bg-white flex flex-col min-h-0">
140
- <Panel title="Image" className="flex-1 flex flex-col min-h-0">
141
- <div className="flex flex-col h-full gap-3 overflow-hidden">
142
- {/* Image Tabs */}
143
- <div className="flex-shrink-0 flex gap-1 bg-dark-input p-1 rounded-xl">
 
144
  <button
145
  onClick={() => setImageTab('input')}
146
- className={`flex-1 px-4 py-2 text-xs font-semibold rounded-lg transition-all ${
147
- imageTab === 'input'
148
- ? 'bg-nvidia-green text-white shadow-md'
149
- : 'text-text-secondary hover:text-text-primary hover:bg-white'
150
- }`}
151
  >
152
  Input
153
  </button>
154
  <button
155
  onClick={() => setImageTab('processed')}
156
  disabled={!processedImage}
157
- className={`flex-1 px-4 py-2 text-xs font-semibold rounded-lg transition-all ${
158
- imageTab === 'processed'
159
- ? 'bg-nvidia-green text-white shadow-md'
160
- : 'text-text-secondary hover:text-text-primary hover:bg-white disabled:opacity-40 disabled:cursor-not-allowed disabled:hover:bg-transparent'
161
- }`}
162
  >
163
  Processed
164
  </button>
165
  </div>
166
 
167
- {/* Image Display */}
168
- <div className="flex-1 min-h-0 overflow-hidden">
169
- {imageTab === 'input' ? (
170
- <FileUpload
171
- onUpload={handleSingleUpload}
172
- preview={preview}
173
- currentFile={file}
174
- isLoading={isLoadingPreview}
175
- />
176
- ) : (
177
- <div className="h-full w-full bg-slate-900 border border-dark-border rounded-xl overflow-hidden flex items-center justify-center">
178
- {processedImage ? (
179
- <img
180
- src={processedImage}
181
- alt="Processed"
182
- className="max-w-full max-h-full w-full h-full object-contain"
183
- />
184
- ) : (
185
- <p className="text-white/60 text-sm">Run estimation to see processed image</p>
186
- )}
187
- </div>
188
- )}
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  </div>
 
 
190
 
191
- {/* Controls Section */}
192
- <div className="flex-shrink-0 space-y-3">
193
- {/* Folder Upload */}
194
- <label className="flex items-center gap-2 px-4 py-2.5 bg-dark-input border border-dark-border rounded-xl cursor-pointer hover:border-nvidia-green/50 hover:bg-nvidia-green/5 transition-all">
195
- <FolderOpen className="w-4 h-4 text-text-secondary" />
196
- <span className="text-sm text-text-secondary font-medium">Load folder...</span>
197
- <input
198
- type="file"
199
- webkitdirectory=""
200
- directory=""
201
- multiple
202
- className="hidden"
203
- onChange={handleFolderUpload}
204
- />
205
- </label>
206
-
207
- {/* Navigation */}
208
- {files.length > 1 && (
209
- <div className="flex items-center justify-between bg-dark-input rounded-xl px-4 py-2.5 border border-dark-border">
210
- <Button
211
- variant="secondary"
212
- onClick={() => navigateImage('prev')}
213
- disabled={currentIndex === 0}
214
- className="!px-2 !py-1 !shadow-none"
215
- >
216
- <ChevronLeft className="w-4 h-4" />
217
- </Button>
218
- <span className="text-sm text-text-secondary font-medium">
219
- {currentIndex + 1} / {files.length}
220
- </span>
221
- <Button
222
- variant="secondary"
223
- onClick={() => navigateImage('next')}
224
- disabled={currentIndex === files.length - 1}
225
- className="!px-2 !py-1 !shadow-none"
226
- >
227
- <ChevronRight className="w-4 h-4" />
228
- </Button>
229
- </div>
230
- )}
231
 
232
- {/* File name */}
233
- {file && (
234
- <div className="text-xs text-text-muted truncate px-1 font-medium">
235
- {file.name}
236
- </div>
237
- )}
 
 
 
 
 
 
 
 
238
 
239
- {/* Preprocessing Badge */}
240
- {(fileType || preprocessingInfo) && (
241
- <PreprocessingBadge
242
- info={preprocessingInfo}
243
- fileType={fileType}
244
- compact
245
- />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  )}
247
 
248
- {/* Settings - Pixel Size (only for non-DICOM) */}
249
- {fileType !== 'dicom' && (
250
- <NumberInput
251
- label="Pixel Size"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  value={pixelSize}
253
- onChange={setPixelSize}
254
- min={0.01}
255
- max={1}
256
- step={0.01}
257
- unit="mm/px"
258
- info="Physical size of each pixel"
259
- compact
260
  />
261
- )}
262
-
263
- {fileType === 'dicom' && (
264
- <div className="px-4 py-3 bg-nvidia-green/10 border border-nvidia-green/20 rounded-xl">
265
- <p className="text-xs text-nvidia-green font-medium">
266
- ✓ Pixel spacing will be extracted from DICOM metadata
267
- </p>
268
- </div>
269
- )}
270
 
271
- {/* Estimate Button */}
272
- <Button
 
273
  onClick={handleEstimate}
274
- disabled={!file}
275
- isLoading={isLoading}
276
- icon={<BarChart3 className="w-4 h-4" />}
 
 
277
  >
278
- Estimate Age
279
- </Button>
280
-
281
- {error && (
282
- <div className="p-3 bg-red-50 border border-red-200 rounded-xl">
283
- <p className="text-red-600 text-xs font-medium">{error}</p>
 
 
 
 
284
  </div>
285
  )}
286
  </div>
287
  </div>
288
- </Panel>
289
- </div>
290
 
291
- {/* Right Panel - Results */}
292
- <div className="w-1/2 bg-dark-bg flex flex-col min-h-0">
293
- <Panel
294
- title="Results"
295
- action={
296
- preprocessingInfo && (
297
- <span className={`text-xs px-2.5 py-1 rounded-full font-semibold ${
298
- preprocessingInfo.pipeline === 'full'
299
- ? 'bg-nvidia-green/10 text-nvidia-green'
300
- : 'bg-amber-500/10 text-amber-600'
301
  }`}>
302
- {preprocessingInfo.pipeline === 'full' ? 'Full Pipeline' : 'Basic Pipeline'}
303
- </span>
304
- )
305
- }
 
 
 
 
 
 
 
306
  className="flex-1 flex flex-col min-h-0"
307
  >
308
  <div className="flex-1 overflow-y-auto">
309
  <GAResultsCard results={results} isLoading={isLoading} />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  </div>
311
  </Panel>
312
  </div>
 
1
+ import { useState, useCallback, useEffect } from 'react';
2
+ import { BarChart3, ChevronLeft, ChevronRight, FolderOpen, AlertTriangle, CheckCircle, Upload } from 'lucide-react';
3
  import { Panel } from '../components/Panel';
4
  import { FileUpload } from '../components/FileUpload';
 
 
5
  import { GAResultsCard } from '../components/GAResultsCard';
6
  import { PreprocessingBadge } from '../components/PreprocessingBadge';
7
+ import { useImageContext, GA_ELIGIBLE_VIEWS, GA_BIOMETRY_LABELS } from '../lib/ImageContext';
8
+ import {
9
+ estimateGestationalAge,
10
+ getFilePreview,
11
+ getFileType,
12
+ isDicomFile,
13
+ FETAL_VIEW_LABELS,
14
+ type GestationalAgeResponse,
15
+ type PreprocessingInfo
16
+ } from '../lib/api';
17
 
18
  export function GestationalAgePage() {
19
+ // Shared context
20
+ const imageContext = useImageContext();
21
+
22
+ // File state (local for this tab's independent uploads)
23
  const [file, setFile] = useState<File | null>(null);
24
  const [preview, setPreview] = useState<string | null>(null);
25
  const [isLoadingPreview, setIsLoadingPreview] = useState(false);
26
+
27
  // Multiple files state (folder)
28
  const [files, setFiles] = useState<File[]>([]);
29
  const [currentIndex, setCurrentIndex] = useState(0);
30
+
31
+ // View selection state
32
+ const [selectedView, setSelectedView] = useState<string>('');
33
+ const [viewSource, setViewSource] = useState<'classification' | 'corrected' | 'manual' | null>(null);
34
+
35
  // Settings & results
36
  const [pixelSize, setPixelSize] = useState(0.1);
37
  const [results, setResults] = useState<GestationalAgeResponse | null>(null);
 
39
  const [processedImage, setProcessedImage] = useState<string | null>(null);
40
  const [isLoading, setIsLoading] = useState(false);
41
  const [error, setError] = useState<string | null>(null);
42
+
43
  // Image view tab
44
  const [imageTab, setImageTab] = useState<'input' | 'processed'>('input');
45
 
46
+ // Sync with shared context when it has data from Classification tab
47
+ useEffect(() => {
48
+ if (imageContext.file && imageContext.classificationResults) {
49
+ setFile(imageContext.file);
50
+ setPreview(imageContext.preview);
51
+ setProcessedImage(imageContext.processedImage);
52
+ setFiles([]);
53
+ setCurrentIndex(0);
54
+ setResults(null);
55
+ setPreprocessingInfo(null);
56
+ setError(null);
57
+
58
+ if (imageContext.correctedView) {
59
+ setSelectedView(imageContext.correctedView);
60
+ setViewSource('corrected');
61
+ } else if (imageContext.predictedView) {
62
+ setSelectedView(imageContext.predictedView);
63
+ setViewSource('classification');
64
+ }
65
+ }
66
+ }, [imageContext.file, imageContext.classificationResults, imageContext.correctedView, imageContext.predictedView, imageContext.preview, imageContext.processedImage]);
67
+
68
+ const isViewEligible = selectedView ? GA_ELIGIBLE_VIEWS.includes(selectedView) : false;
69
+
70
  const loadPreview = useCallback(async (selectedFile: File) => {
 
71
  if (!isDicomFile(selectedFile.name)) {
72
  setPreview(URL.createObjectURL(selectedFile));
73
  return;
74
  }
75
+
 
76
  setIsLoadingPreview(true);
77
  try {
78
  const response = await getFilePreview(selectedFile);
 
96
  setProcessedImage(null);
97
  setError(null);
98
  setImageTab('input');
99
+ setSelectedView('');
100
+ setViewSource(null);
101
  loadPreview(uploadedFile);
102
  }, [loadPreview]);
103
 
104
  const handleFolderUpload = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
105
  const fileList = e.target.files;
106
  if (!fileList) return;
107
+
108
+ const validFiles = Array.from(fileList).filter(f =>
109
+ f.type.startsWith('image/') ||
110
  isDicomFile(f.name)
111
  ).sort((a, b) => a.name.localeCompare(b.name));
112
+
113
  if (validFiles.length > 0) {
114
  setFiles(validFiles);
115
  setCurrentIndex(0);
 
119
  setProcessedImage(null);
120
  setError(null);
121
  setImageTab('input');
122
+ setSelectedView('');
123
+ setViewSource(null);
124
  loadPreview(validFiles[0]);
125
  }
126
  }, [loadPreview]);
127
 
128
  const navigateImage = useCallback((direction: 'prev' | 'next') => {
129
  if (files.length === 0) return;
130
+
131
  let newIndex = currentIndex;
132
  if (direction === 'prev' && currentIndex > 0) {
133
  newIndex = currentIndex - 1;
134
  } else if (direction === 'next' && currentIndex < files.length - 1) {
135
  newIndex = currentIndex + 1;
136
  }
137
+
138
  if (newIndex !== currentIndex) {
139
  setCurrentIndex(newIndex);
140
  setFile(files[newIndex]);
 
142
  setPreprocessingInfo(null);
143
  setProcessedImage(null);
144
  setImageTab('input');
145
+ setSelectedView('');
146
+ setViewSource(null);
147
  loadPreview(files[newIndex]);
148
  }
149
  }, [files, currentIndex, loadPreview]);
150
 
151
+ const handleViewChange = (view: string) => {
152
+ setSelectedView(view);
153
+ setViewSource('manual');
154
+ };
155
+
156
  const handleEstimate = async () => {
157
+ if (!file || !selectedView || !isViewEligible) return;
158
 
159
  setIsLoading(true);
160
  setError(null);
161
 
162
  try {
163
+ const response = await estimateGestationalAge(file, pixelSize, selectedView);
164
  setResults(response);
165
  setPreprocessingInfo(response.preprocessing);
166
+
 
167
  if (response.preprocessing.processed_image_base64) {
168
  setProcessedImage(`data:image/png;base64,${response.preprocessing.processed_image_base64}`);
169
  }
 
179
 
180
  const fileType = file ? getFileType(file.name) : null;
181
 
182
+ const getButtonTooltip = () => {
183
+ if (!file) return 'Upload an image first';
184
+ if (!selectedView) return 'Select a view type';
185
+ if (!isViewEligible) return 'Only brain, abdomen, femur supported';
186
+ return '';
187
+ };
188
+
189
+ const isButtonDisabled = !file || !selectedView || !isViewEligible;
190
+ const displayImage = imageTab === 'processed' && processedImage ? processedImage : preview;
191
+
192
  return (
193
  <div className="flex flex-1 min-h-0 overflow-hidden">
194
+ {/* Left Panel - Image (60%) */}
195
+ <div className="w-3/5 border-r border-dark-border bg-slate-900 flex flex-col min-h-0">
196
+ {/* Compact Header */}
197
+ <div className="flex-shrink-0 px-4 py-2 bg-slate-800 border-b border-slate-700 flex items-center justify-between">
198
+ <div className="flex items-center gap-3">
199
+ {/* Image Tab Toggle */}
200
+ <div className="flex gap-1 bg-slate-700 p-0.5 rounded-lg">
201
  <button
202
  onClick={() => setImageTab('input')}
203
+ className={`px-3 py-1 text-xs font-medium rounded-md transition-all ${imageTab === 'input'
204
+ ? 'bg-nvidia-green text-white'
205
+ : 'text-slate-400 hover:text-white'
206
+ }`}
 
207
  >
208
  Input
209
  </button>
210
  <button
211
  onClick={() => setImageTab('processed')}
212
  disabled={!processedImage}
213
+ className={`px-3 py-1 text-xs font-medium rounded-md transition-all ${imageTab === 'processed'
214
+ ? 'bg-nvidia-green text-white'
215
+ : 'text-slate-400 hover:text-white disabled:opacity-40 disabled:cursor-not-allowed'
216
+ }`}
 
217
  >
218
  Processed
219
  </button>
220
  </div>
221
 
222
+ {/* File info */}
223
+ {file && (
224
+ <span className="text-xs text-slate-400 truncate max-w-[150px]">
225
+ {file.name}
226
+ </span>
227
+ )}
228
+
229
+ {/* DICOM badge */}
230
+ {fileType === 'dicom' && (
231
+ <span className="px-2 py-0.5 bg-nvidia-green/20 text-nvidia-green text-xs rounded-full font-medium">
232
+ DICOM
233
+ </span>
234
+ )}
235
+ </div>
236
+
237
+ {/* Folder navigation */}
238
+ {files.length > 1 && (
239
+ <div className="flex items-center gap-2">
240
+ <button
241
+ onClick={() => navigateImage('prev')}
242
+ disabled={currentIndex === 0}
243
+ className="p-1 text-slate-400 hover:text-white disabled:opacity-40"
244
+ >
245
+ <ChevronLeft className="w-4 h-4" />
246
+ </button>
247
+ <span className="text-xs text-slate-400">
248
+ {currentIndex + 1}/{files.length}
249
+ </span>
250
+ <button
251
+ onClick={() => navigateImage('next')}
252
+ disabled={currentIndex === files.length - 1}
253
+ className="p-1 text-slate-400 hover:text-white disabled:opacity-40"
254
+ >
255
+ <ChevronRight className="w-4 h-4" />
256
+ </button>
257
  </div>
258
+ )}
259
+ </div>
260
 
261
+ {/* Image Display - fills remaining space */}
262
+ <div className="flex-1 min-h-0 p-4">
263
+ {displayImage ? (
264
+ <img
265
+ src={displayImage}
266
+ alt="Ultrasound"
267
+ className="w-full h-full object-contain rounded-lg"
268
+ />
269
+ ) : (
270
+ <FileUpload
271
+ onUpload={handleSingleUpload}
272
+ preview={null}
273
+ currentFile={null}
274
+ isLoading={isLoadingPreview}
275
+ />
276
+ )}
277
+ </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
 
279
+ {/* Compact Control Bar */}
280
+ <div className="flex-shrink-0 px-4 py-3 bg-slate-800 border-t border-slate-700">
281
+ <div className="flex items-center gap-3">
282
+ {/* Upload/Folder buttons */}
283
+ <label className="flex items-center gap-1.5 px-3 py-1.5 bg-slate-700 hover:bg-slate-600 rounded-lg cursor-pointer transition-colors">
284
+ <Upload className="w-3.5 h-3.5 text-slate-300" />
285
+ <span className="text-xs text-slate-300 font-medium">Upload</span>
286
+ <input
287
+ type="file"
288
+ accept="image/*,.dcm,.dicom"
289
+ className="hidden"
290
+ onChange={(e) => e.target.files?.[0] && handleSingleUpload(e.target.files[0])}
291
+ />
292
+ </label>
293
 
294
+ <label className="flex items-center gap-1.5 px-3 py-1.5 bg-slate-700 hover:bg-slate-600 rounded-lg cursor-pointer transition-colors">
295
+ <FolderOpen className="w-3.5 h-3.5 text-slate-300" />
296
+ <span className="text-xs text-slate-300 font-medium">Folder</span>
297
+ <input
298
+ type="file"
299
+ webkitdirectory=""
300
+ directory=""
301
+ multiple
302
+ className="hidden"
303
+ onChange={handleFolderUpload}
304
+ />
305
+ </label>
306
+
307
+ <div className="w-px h-6 bg-slate-600" />
308
+
309
+ {/* View Selector */}
310
+ <div className="flex items-center gap-2 flex-1">
311
+ {viewSource && (
312
+ <span className={`text-xs px-1.5 py-0.5 rounded ${viewSource === 'corrected' ? 'bg-blue-500/20 text-blue-400' :
313
+ viewSource === 'classification' ? 'bg-green-500/20 text-green-400' :
314
+ 'bg-slate-600 text-slate-400'
315
+ }`}>
316
+ {viewSource === 'corrected' ? 'Corrected' :
317
+ viewSource === 'classification' ? 'Auto' : 'Manual'}
318
+ </span>
319
  )}
320
 
321
+ <select
322
+ value={selectedView}
323
+ onChange={(e) => handleViewChange(e.target.value)}
324
+ className="flex-1 px-2 py-1.5 bg-slate-700 border border-slate-600 rounded-lg text-xs text-white focus:outline-none focus:ring-1 focus:ring-nvidia-green"
325
+ >
326
+ <option value="">Select view...</option>
327
+ <optgroup label="GA-Eligible">
328
+ {GA_ELIGIBLE_VIEWS.map((view) => (
329
+ <option key={view} value={view}>
330
+ {view} ({GA_BIOMETRY_LABELS[view]})
331
+ </option>
332
+ ))}
333
+ </optgroup>
334
+ <optgroup label="Other">
335
+ {FETAL_VIEW_LABELS.filter(v => !GA_ELIGIBLE_VIEWS.includes(v)).map((view) => (
336
+ <option key={view} value={view}>{view}</option>
337
+ ))}
338
+ </optgroup>
339
+ </select>
340
+ </div>
341
+
342
+ {/* Pixel size (non-DICOM only) */}
343
+ {file && fileType !== 'dicom' && (
344
+ <div className="flex items-center gap-1.5">
345
+ <span className="text-xs text-slate-400">px:</span>
346
+ <input
347
+ type="number"
348
  value={pixelSize}
349
+ onChange={(e) => setPixelSize(parseFloat(e.target.value) || 0.1)}
350
+ step="0.01"
351
+ min="0.01"
352
+ max="1"
353
+ className="w-16 px-2 py-1 bg-slate-700 border border-slate-600 rounded text-xs text-white"
 
 
354
  />
355
+ <span className="text-xs text-slate-500">mm</span>
356
+ </div>
357
+ )}
 
 
 
 
 
 
358
 
359
+ {/* Estimate Button */}
360
+ <div className="relative group">
361
+ <button
362
  onClick={handleEstimate}
363
+ disabled={isButtonDisabled || isLoading}
364
+ className={`flex items-center gap-2 px-4 py-1.5 rounded-lg text-sm font-semibold transition-all ${isButtonDisabled
365
+ ? 'bg-slate-600 text-slate-400 cursor-not-allowed'
366
+ : 'bg-nvidia-green text-white hover:bg-nvidia-green-hover shadow-lg'
367
+ }`}
368
  >
369
+ {isLoading ? (
370
+ <div className="w-4 h-4 border-2 border-white/30 border-t-white rounded-full animate-spin" />
371
+ ) : (
372
+ <BarChart3 className="w-4 h-4" />
373
+ )}
374
+ Estimate
375
+ </button>
376
+ {isButtonDisabled && getButtonTooltip() && (
377
+ <div className="absolute bottom-full left-1/2 -translate-x-1/2 mb-2 px-2 py-1 bg-slate-900 text-white text-xs rounded opacity-0 group-hover:opacity-100 transition-opacity whitespace-nowrap">
378
+ {getButtonTooltip()}
379
  </div>
380
  )}
381
  </div>
382
  </div>
 
 
383
 
384
+ {/* Error/Warning row */}
385
+ {(error || (selectedView && !isViewEligible)) && (
386
+ <div className={`mt-2 p-2 rounded-lg flex items-center gap-2 text-xs ${error ? 'bg-red-500/10 text-red-400' : 'bg-amber-500/10 text-amber-400'
 
 
 
 
 
 
 
387
  }`}>
388
+ <AlertTriangle className="w-3.5 h-3.5 flex-shrink-0" />
389
+ {error || 'Only brain, abdomen, and femur views are supported for GA estimation'}
390
+ </div>
391
+ )}
392
+ </div>
393
+ </div>
394
+
395
+ {/* Right Panel - Results (40%) */}
396
+ <div className="w-2/5 bg-white flex flex-col min-h-0">
397
+ <Panel
398
+ title="Results"
399
  className="flex-1 flex flex-col min-h-0"
400
  >
401
  <div className="flex-1 overflow-y-auto">
402
  <GAResultsCard results={results} isLoading={isLoading} />
403
+
404
+ {/* View info in results */}
405
+ {results && selectedView && (
406
+ <div className="mt-4 p-3 bg-slate-50 rounded-xl border border-slate-200">
407
+ <div className="flex items-center gap-2 mb-2">
408
+ <CheckCircle className="w-4 h-4 text-nvidia-green" />
409
+ <span className="text-sm font-medium text-slate-700">Estimation Complete</span>
410
+ </div>
411
+ <div className="space-y-1 text-xs text-slate-600">
412
+ <p><span className="font-medium">View:</span> {selectedView}</p>
413
+ <p><span className="font-medium">Biometry:</span> {GA_BIOMETRY_LABELS[selectedView]}</p>
414
+ {preprocessingInfo?.metadata?.pixel_spacing && (
415
+ <p><span className="font-medium">Pixel Spacing:</span> {preprocessingInfo.metadata.pixel_spacing.toFixed(3)} mm/px</p>
416
+ )}
417
+ </div>
418
+ </div>
419
+ )}
420
+
421
+ {/* Preprocessing Badge - same style as Classification */}
422
+ {preprocessingInfo && (
423
+ <div className="mt-4">
424
+ <PreprocessingBadge info={preprocessingInfo} fileType={fileType} />
425
+ </div>
426
+ )}
427
  </div>
428
  </Panel>
429
  </div>