KingmaoQ commited on
Commit
b959f4f
·
1 Parent(s): 2a06e38

Add ovarian CT progression viewer

Browse files
RL0910/demo_imaging_cases/cases/TCGA-25-1328/manifest.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "format": "rldt_ov_imaging_package",
3
+ "version": 1,
4
+ "case_id": "TCGA-25-1328",
5
+ "patient_id": "TCGA-25-1328",
6
+ "display_name": "TCGA-OV TCGA-25-1328",
7
+ "modality": "CT",
8
+ "summary_note": "Existing progression visualization built from preprocessed TCGA-OV CT series. Lesion highlighting is heuristic when no label is available.",
9
+ "timepoints": [
10
+ {
11
+ "timepoint_id": "t01",
12
+ "label": "01-16-1986-CT ChestAbdomenPelv-10718",
13
+ "relative_time": 1.0,
14
+ "asset_path": "tp_01.json",
15
+ "series_description": "AXIAL",
16
+ "num_slices_original": 144,
17
+ "spacing_zyx_mm": [
18
+ 4.468531440559441,
19
+ 0.703125,
20
+ 0.703125
21
+ ],
22
+ "lesion_confidence": 0.42,
23
+ "lesion_source": "heuristic_component",
24
+ "lesion_voxel_count": 1015
25
+ },
26
+ {
27
+ "timepoint_id": "t02",
28
+ "label": "04-12-1985-Abdomen1 AP ROUTINE-14165",
29
+ "relative_time": 4.0,
30
+ "asset_path": "tp_02.json",
31
+ "series_description": "AP Routine 5.0 B40f",
32
+ "num_slices_original": 90,
33
+ "spacing_zyx_mm": [
34
+ 5.0,
35
+ 0.703125,
36
+ 0.703125
37
+ ],
38
+ "lesion_confidence": 0.42,
39
+ "lesion_source": "heuristic_component",
40
+ "lesion_voxel_count": 4034
41
+ },
42
+ {
43
+ "timepoint_id": "t03",
44
+ "label": "08-23-1986-Abdomen01APRoutine Adult-21186",
45
+ "relative_time": 8.0,
46
+ "asset_path": "tp_03.json",
47
+ "series_description": "AP Routine 5.0 B40f",
48
+ "num_slices_original": 96,
49
+ "spacing_zyx_mm": [
50
+ 5.0,
51
+ 0.78125,
52
+ 0.78125
53
+ ],
54
+ "lesion_confidence": 0.42,
55
+ "lesion_source": "heuristic_component",
56
+ "lesion_voxel_count": 3512
57
+ },
58
+ {
59
+ "timepoint_id": "t04",
60
+ "label": "12-26-1986-CT ABDOMEN PELVIS-83459",
61
+ "relative_time": 12.0,
62
+ "asset_path": "tp_04.json",
63
+ "series_description": "AXIAL",
64
+ "num_slices_original": 67,
65
+ "spacing_zyx_mm": [
66
+ 6.893939560606061,
67
+ 0.78125,
68
+ 0.78125
69
+ ],
70
+ "lesion_confidence": 0.42,
71
+ "lesion_source": "heuristic_component",
72
+ "lesion_voxel_count": 683
73
+ }
74
+ ]
75
+ }
RL0910/demo_imaging_cases/cases/TCGA-25-1328/tp_01.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-1328/tp_02.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-1328/tp_03.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-1328/tp_04.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-1633/manifest.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "format": "rldt_ov_imaging_package",
3
+ "version": 1,
4
+ "case_id": "TCGA-25-1633",
5
+ "patient_id": "TCGA-25-1633",
6
+ "display_name": "TCGA-OV TCGA-25-1633",
7
+ "modality": "CT",
8
+ "summary_note": "Existing progression visualization built from preprocessed TCGA-OV CT series. Lesion highlighting is heuristic when no label is available.",
9
+ "timepoints": [
10
+ {
11
+ "timepoint_id": "t01",
12
+ "label": "02-21-1987-Abdomen020APRoutine Adult-53316",
13
+ "relative_time": 2.0,
14
+ "asset_path": "tp_01.json",
15
+ "series_description": "AP Routine 3.0 SPO cor",
16
+ "num_slices_original": 97,
17
+ "spacing_zyx_mm": [
18
+ 2.5,
19
+ 0.732421875,
20
+ 0.732421875
21
+ ],
22
+ "lesion_confidence": 0.42,
23
+ "lesion_source": "heuristic_component",
24
+ "lesion_voxel_count": 1050
25
+ },
26
+ {
27
+ "timepoint_id": "t02",
28
+ "label": "04-28-1988-Abdomen020APRoutine Adult-77184",
29
+ "relative_time": 4.0,
30
+ "asset_path": "tp_02.json",
31
+ "series_description": "AP Routine 5.0 B40f",
32
+ "num_slices_original": 82,
33
+ "spacing_zyx_mm": [
34
+ 5.0,
35
+ 0.703125,
36
+ 0.703125
37
+ ],
38
+ "lesion_confidence": 0.42,
39
+ "lesion_source": "heuristic_component",
40
+ "lesion_voxel_count": 1385
41
+ },
42
+ {
43
+ "timepoint_id": "t03",
44
+ "label": "08-02-1987-Abdomen020APRoutine Adult-13930",
45
+ "relative_time": 8.0,
46
+ "asset_path": "tp_03.json",
47
+ "series_description": "AP routine 5.0 B40f",
48
+ "num_slices_original": 86,
49
+ "spacing_zyx_mm": [
50
+ 5.0,
51
+ 0.6640625,
52
+ 0.6640625
53
+ ],
54
+ "lesion_confidence": 0.42,
55
+ "lesion_source": "heuristic_component",
56
+ "lesion_voxel_count": 2379
57
+ },
58
+ {
59
+ "timepoint_id": "t04",
60
+ "label": "12-27-1986-CT ABDOMEN PELVIS-23762",
61
+ "relative_time": 12.0,
62
+ "asset_path": "tp_04.json",
63
+ "series_description": "AXIAL",
64
+ "num_slices_original": 82,
65
+ "spacing_zyx_mm": [
66
+ 5.000000024691358,
67
+ 0.742188,
68
+ 0.742188
69
+ ],
70
+ "lesion_confidence": 0.42,
71
+ "lesion_source": "heuristic_component",
72
+ "lesion_voxel_count": 19345
73
+ }
74
+ ]
75
+ }
RL0910/demo_imaging_cases/cases/TCGA-25-1633/tp_01.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-1633/tp_02.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-1633/tp_03.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-1633/tp_04.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-2393/manifest.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "format": "rldt_ov_imaging_package",
3
+ "version": 1,
4
+ "case_id": "TCGA-25-2393",
5
+ "patient_id": "TCGA-25-2393",
6
+ "display_name": "TCGA-OV TCGA-25-2393",
7
+ "modality": "CT",
8
+ "summary_note": "Existing progression visualization built from preprocessed TCGA-OV CT series. Lesion highlighting is heuristic when no label is available.",
9
+ "timepoints": [
10
+ {
11
+ "timepoint_id": "t01",
12
+ "label": "01-17-1987-Abdomen020APRoutine Adult-74338",
13
+ "relative_time": 1.0,
14
+ "asset_path": "tp_01.json",
15
+ "series_description": "AP Routine 3.0 SPO cor",
16
+ "num_slices_original": 128,
17
+ "spacing_zyx_mm": [
18
+ 2.5,
19
+ 0.890625,
20
+ 0.890625
21
+ ],
22
+ "lesion_confidence": 0.22,
23
+ "lesion_source": "heuristic_ellipsoid",
24
+ "lesion_voxel_count": 2353
25
+ },
26
+ {
27
+ "timepoint_id": "t02",
28
+ "label": "04-27-1988-Abdomen020APRoutine Adult-64382",
29
+ "relative_time": 4.0,
30
+ "asset_path": "tp_02.json",
31
+ "series_description": "AP routine 5.0 B40f",
32
+ "num_slices_original": 80,
33
+ "spacing_zyx_mm": [
34
+ 5.0,
35
+ 0.859375,
36
+ 0.859375
37
+ ],
38
+ "lesion_confidence": 0.42,
39
+ "lesion_source": "heuristic_component",
40
+ "lesion_voxel_count": 310
41
+ },
42
+ {
43
+ "timepoint_id": "t03",
44
+ "label": "08-15-1985-CT ABDOMEN PELVIS-61715",
45
+ "relative_time": 8.0,
46
+ "asset_path": "tp_03.json",
47
+ "series_description": "AXIAL",
48
+ "num_slices_original": 18,
49
+ "spacing_zyx_mm": [
50
+ 5.764705882352941,
51
+ 0.859375,
52
+ 0.859375
53
+ ],
54
+ "lesion_confidence": 0.42,
55
+ "lesion_source": "heuristic_component",
56
+ "lesion_voxel_count": 429
57
+ },
58
+ {
59
+ "timepoint_id": "t04",
60
+ "label": "12-02-1986-CT ABDOMEN PELVIS-18560",
61
+ "relative_time": 12.0,
62
+ "asset_path": "tp_04.json",
63
+ "series_description": "AXIAL",
64
+ "num_slices_original": 97,
65
+ "spacing_zyx_mm": [
66
+ 5.000000114583333,
67
+ 0.859375,
68
+ 0.859375
69
+ ],
70
+ "lesion_confidence": 0.42,
71
+ "lesion_source": "heuristic_component",
72
+ "lesion_voxel_count": 3104
73
+ }
74
+ ]
75
+ }
RL0910/demo_imaging_cases/cases/TCGA-25-2393/tp_01.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-2393/tp_02.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-2393/tp_03.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/cases/TCGA-25-2393/tp_04.json ADDED
The diff for this file is too large to render. See raw diff
 
RL0910/demo_imaging_cases/manifest.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "format": "rldt_ov_imaging_package",
3
+ "version": 1,
4
+ "cases": [
5
+ {
6
+ "case_id": "TCGA-25-1633",
7
+ "display_name": "TCGA-OV TCGA-25-1633",
8
+ "timepoint_count": 4
9
+ },
10
+ {
11
+ "case_id": "TCGA-25-1328",
12
+ "display_name": "TCGA-OV TCGA-25-1328",
13
+ "timepoint_count": 4
14
+ },
15
+ {
16
+ "case_id": "TCGA-25-2393",
17
+ "display_name": "TCGA-OV TCGA-25-2393",
18
+ "timepoint_count": 4
19
+ }
20
+ ]
21
+ }
RL0910/enhanced_chat_ui.py CHANGED
@@ -57,6 +57,13 @@ from online_monitor import OnlineSystemMonitor
57
  from system_health_check import SystemHealthChecker
58
 
59
  from drive_tools import load_data_source, generate_patient_report, generate_patient_report_ui
 
 
 
 
 
 
 
60
 
61
  # ---- Optional BCQ loader ----
62
  def _load_bcq_policy(path: str):
@@ -788,6 +795,16 @@ def create_gradio_interface():
788
  - 🤖 LLM Co-Pilot: Context-aware help available on every page
789
  """)
790
 
 
 
 
 
 
 
 
 
 
 
791
  with gr.Row():
792
  with gr.Column(scale=4, min_width=720):
793
  with gr.Tabs():
@@ -1001,7 +1018,69 @@ def create_gradio_interface():
1001
 
1002
  export_output = gr.File(label="Exported Data", visible=False)
1003
 
1004
- # Tab 2: Parameter Control
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1005
  with gr.Tab("⚙️ Parameter Control"):
1006
  gr.Markdown("### Model Parameter Configuration")
1007
 
@@ -2785,6 +2864,69 @@ def create_gradio_interface():
2785
  inputs=[patient_dropdown],
2786
  outputs=[export_output]
2787
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2788
 
2789
  # Chat events with patient context
2790
  msg.submit(
@@ -3296,6 +3438,22 @@ def create_gradio_interface():
3296
  """
3297
  )
3298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3299
  return demo
3300
 
3301
 
 
57
  from system_health_check import SystemHealthChecker
58
 
59
  from drive_tools import load_data_source, generate_patient_report, generate_patient_report_ui
60
+ from imaging_viewer import (
61
+ imaging_upload_requirements_md,
62
+ initialize_demo_view,
63
+ load_imaging_case,
64
+ select_gallery_slice,
65
+ update_imaging_timepoint,
66
+ )
67
 
68
  # ---- Optional BCQ loader ----
69
  def _load_bcq_policy(path: str):
 
795
  - 🤖 LLM Co-Pilot: Context-aware help available on every page
796
  """)
797
 
798
+ with gr.Row():
799
+ with gr.Column(scale=4):
800
+ gr.Markdown("")
801
+ with gr.Column(scale=1, min_width=240):
802
+ open_imaging_workspace_btn = _register_button_with_help(
803
+ "🩻 Open Ovarian Imaging Viewer",
804
+ "Jump to the CT progression viewer for preloaded TCGA-OV imaging cases or uploaded imaging packages.",
805
+ variant="primary",
806
+ )
807
+
808
  with gr.Row():
809
  with gr.Column(scale=4, min_width=720):
810
  with gr.Tabs():
 
1018
 
1019
  export_output = gr.File(label="Exported Data", visible=False)
1020
 
1021
+ # Tab 2: Ovarian Imaging Viewer
1022
+ with gr.Tab("🩻 Ovarian Imaging Viewer") as imaging_tab:
1023
+ gr.Markdown("### Ovarian Cancer Existing Progression Viewer")
1024
+ gr.Markdown(
1025
+ "> Existing progression visualization only. This workspace shows preloaded TCGA-OV CT time series or uploaded RLDT imaging packages, with heuristic lesion highlighting when no mask is available."
1026
+ )
1027
+
1028
+ with gr.Row():
1029
+ imaging_source_mode = gr.Radio(
1030
+ choices=["Demo Cases", "Upload Package"],
1031
+ value="Demo Cases",
1032
+ label="Imaging source",
1033
+ info="Use the preloaded TCGA-OV cases or upload one packaged case for visualization."
1034
+ )
1035
+ imaging_case_dropdown = gr.Dropdown(
1036
+ choices=[],
1037
+ value=None,
1038
+ label="Demo case",
1039
+ info="Select one of the preloaded TCGA-OV longitudinal imaging cases.",
1040
+ )
1041
+ imaging_timepoint_dropdown = gr.Dropdown(
1042
+ choices=[],
1043
+ value=None,
1044
+ label="Timepoint",
1045
+ info="Switch between available visits for the active case.",
1046
+ )
1047
+
1048
+ with gr.Row():
1049
+ with gr.Column(scale=2):
1050
+ imaging_upload = gr.File(
1051
+ label="Upload Imaging Package (.zip)",
1052
+ file_types=[".zip"],
1053
+ visible=False,
1054
+ )
1055
+ with gr.Column(scale=1):
1056
+ imaging_load_btn = _register_button_with_help(
1057
+ "Load Imaging Case",
1058
+ "Load the selected demo case or uploaded imaging package into the progression viewer.",
1059
+ variant="primary",
1060
+ )
1061
+
1062
+ imaging_requirements = gr.Markdown(imaging_upload_requirements_md())
1063
+ imaging_case_info = gr.Markdown("No imaging case loaded yet.")
1064
+
1065
+ with gr.Row():
1066
+ with gr.Column(scale=3, min_width=640):
1067
+ imaging_plot = gr.Plot(label="3D Existing Progression View")
1068
+ with gr.Column(scale=2, min_width=360):
1069
+ imaging_timeline_plot = gr.Plot(label="Progression Timeline")
1070
+ imaging_slice_detail = gr.Image(label="Selected Slice", visible=False)
1071
+ imaging_slice_markdown = gr.Markdown("Load a case to inspect slices.")
1072
+
1073
+ imaging_gallery = gr.Gallery(
1074
+ label="Representative Axial Slices",
1075
+ columns=4,
1076
+ rows=2,
1077
+ height=320,
1078
+ preview=False,
1079
+ )
1080
+ imaging_state = gr.State({})
1081
+ imaging_slice_indices = gr.State([])
1082
+
1083
+ # Tab 3: Parameter Control
1084
  with gr.Tab("⚙️ Parameter Control"):
1085
  gr.Markdown("### Model Parameter Configuration")
1086
 
 
2864
  inputs=[patient_dropdown],
2865
  outputs=[export_output]
2866
  )
2867
+
2868
+ def _toggle_imaging_source(mode):
2869
+ use_upload = mode == "Upload Package"
2870
+ return (
2871
+ gr.update(visible=use_upload),
2872
+ gr.update(visible=not use_upload),
2873
+ )
2874
+
2875
+ imaging_source_mode.change(
2876
+ _toggle_imaging_source,
2877
+ inputs=[imaging_source_mode],
2878
+ outputs=[imaging_upload, imaging_case_dropdown],
2879
+ )
2880
+
2881
+ imaging_load_btn.click(
2882
+ load_imaging_case,
2883
+ inputs=[imaging_source_mode, imaging_upload, imaging_case_dropdown, imaging_timepoint_dropdown],
2884
+ outputs=[
2885
+ imaging_case_dropdown,
2886
+ imaging_timepoint_dropdown,
2887
+ imaging_case_info,
2888
+ imaging_plot,
2889
+ imaging_gallery,
2890
+ imaging_timeline_plot,
2891
+ imaging_slice_markdown,
2892
+ imaging_slice_detail,
2893
+ imaging_state,
2894
+ imaging_slice_indices,
2895
+ ],
2896
+ )
2897
+
2898
+ imaging_timepoint_dropdown.change(
2899
+ update_imaging_timepoint,
2900
+ inputs=[imaging_state, imaging_timepoint_dropdown],
2901
+ outputs=[
2902
+ imaging_case_info,
2903
+ imaging_plot,
2904
+ imaging_gallery,
2905
+ imaging_timeline_plot,
2906
+ imaging_slice_markdown,
2907
+ imaging_slice_detail,
2908
+ imaging_state,
2909
+ imaging_slice_indices,
2910
+ ],
2911
+ )
2912
+
2913
+ imaging_gallery.select(
2914
+ select_gallery_slice,
2915
+ inputs=[imaging_state, imaging_slice_indices],
2916
+ outputs=[imaging_slice_detail, imaging_slice_markdown],
2917
+ )
2918
+
2919
+ open_imaging_workspace_btn.click(
2920
+ lambda: None,
2921
+ outputs=[],
2922
+ queue=False,
2923
+ js="""() => {
2924
+ const buttons = Array.from(document.querySelectorAll('button'));
2925
+ const target = buttons.find((el) => el.innerText && el.innerText.trim() === '🩻 Ovarian Imaging Viewer');
2926
+ if (target) { target.click(); }
2927
+ return [];
2928
+ }""",
2929
+ )
2930
 
2931
  # Chat events with patient context
2932
  msg.submit(
 
3438
  """
3439
  )
3440
 
3441
+ demo.load(
3442
+ initialize_demo_view,
3443
+ outputs=[
3444
+ imaging_case_dropdown,
3445
+ imaging_timepoint_dropdown,
3446
+ imaging_case_info,
3447
+ imaging_plot,
3448
+ imaging_gallery,
3449
+ imaging_timeline_plot,
3450
+ imaging_slice_markdown,
3451
+ imaging_slice_detail,
3452
+ imaging_state,
3453
+ imaging_slice_indices,
3454
+ ],
3455
+ )
3456
+
3457
  return demo
3458
 
3459
 
RL0910/imaging_viewer.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import io
5
+ import json
6
+ import math
7
+ import tempfile
8
+ import zipfile
9
+ import zlib
10
+ from functools import lru_cache
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ import gradio as gr
15
+ import numpy as np
16
+ import plotly.graph_objects as go
17
+ from PIL import Image, ImageDraw
18
+ from scipy import ndimage as ndi
19
+
20
+
21
+ MODULE_DIR = Path(__file__).resolve().parent
22
+ DEMO_ROOT = MODULE_DIR / "demo_imaging_cases"
23
+ UPLOAD_CACHE_ROOT = Path(tempfile.gettempdir()) / "rldt_ov_imaging_uploads"
24
+ PACKAGE_MAGIC = "rldt_ov_imaging_package"
25
+
26
+
27
+ def _json_load(path: Path) -> dict[str, Any]:
28
+ return json.loads(path.read_text(encoding="utf-8"))
29
+
30
+
31
+ @lru_cache(maxsize=32)
32
+ def _load_asset(asset_path: str) -> dict[str, Any]:
33
+ path = Path(asset_path)
34
+ if path.suffix == ".npz":
35
+ data = np.load(path, allow_pickle=False)
36
+ return {key: data[key] for key in data.files}
37
+ payload = json.loads(path.read_text(encoding="utf-8"))
38
+ shape = tuple(payload["shape"])
39
+ volume_bytes = zlib.decompress(base64.b85decode(payload["volume_u8_b85"].encode("ascii")))
40
+ lesion_bytes = zlib.decompress(base64.b85decode(payload["lesion_mask_b85"].encode("ascii")))
41
+ return {
42
+ "volume_u8": np.frombuffer(volume_bytes, dtype=np.uint8).reshape(shape),
43
+ "lesion_mask": np.frombuffer(lesion_bytes, dtype=np.uint8).reshape(shape),
44
+ "roi_center_zyx": np.array(payload.get("roi_center_zyx", [shape[0] // 2, shape[1] // 2, shape[2] // 2]), dtype=np.int16),
45
+ }
46
+
47
+
48
+ def list_demo_cases() -> list[dict[str, Any]]:
49
+ manifest_path = DEMO_ROOT / "manifest.json"
50
+ if not manifest_path.exists():
51
+ return []
52
+ manifest = _json_load(manifest_path)
53
+ return manifest.get("cases", [])
54
+
55
+
56
+ def _load_case_manifest(case_root: Path) -> dict[str, Any]:
57
+ return _json_load(case_root / "manifest.json")
58
+
59
+
60
+ def _body_mask_from_volume(volume_u8: np.ndarray) -> np.ndarray:
61
+ body = volume_u8 > 12
62
+ if not body.any():
63
+ return body
64
+ labels, n = ndi.label(body)
65
+ if n <= 0:
66
+ return body
67
+ sizes = np.bincount(labels.ravel())
68
+ sizes[0] = 0
69
+ return labels == int(sizes.argmax())
70
+
71
+
72
+ def _sample_points(mask: np.ndarray, max_points: int) -> np.ndarray:
73
+ coords = np.argwhere(mask)
74
+ if len(coords) == 0:
75
+ return np.zeros((0, 3), dtype=np.int16)
76
+ if len(coords) <= max_points:
77
+ return coords.astype(np.int16)
78
+ step = max(1, len(coords) // max_points)
79
+ return coords[::step][:max_points].astype(np.int16)
80
+
81
+
82
+ def _surface_points(body_mask: np.ndarray, max_points: int = 4500) -> np.ndarray:
83
+ eroded = ndi.binary_erosion(body_mask, iterations=1)
84
+ surface = body_mask & (~eroded)
85
+ return _sample_points(surface, max_points=max_points)
86
+
87
+
88
+ def _lesion_points(lesion_mask: np.ndarray, max_points: int = 2200) -> np.ndarray:
89
+ return _sample_points(lesion_mask > 0, max_points=max_points)
90
+
91
+
92
+ def _region_name(z: int, y: int, x: int, shape: tuple[int, int, int]) -> tuple[str, str]:
93
+ d, h, w = shape
94
+ zf = z / max(d - 1, 1)
95
+ xf = x / max(w - 1, 1)
96
+ if zf < 0.35:
97
+ cranio = "upper abdomen"
98
+ note = "Liver, stomach, and upper peritoneal cavity are more likely in this level."
99
+ elif zf < 0.68:
100
+ cranio = "mid abdomen"
101
+ note = "Bowel loops, mesentery, and central peritoneal cavity are more likely in this level."
102
+ else:
103
+ cranio = "pelvis"
104
+ note = "Adnexal, uterine, bladder, and pelvic peritoneal structures are more likely in this level."
105
+ if xf < 0.4:
106
+ side = "left"
107
+ elif xf > 0.6:
108
+ side = "right"
109
+ else:
110
+ side = "midline"
111
+ return f"{side} {cranio}", note
112
+
113
+
114
+ def _volume_to_rgb(slice_img: np.ndarray, lesion_slice: np.ndarray | None = None) -> Image.Image:
115
+ base = np.stack([slice_img] * 3, axis=-1).astype(np.uint8)
116
+ if lesion_slice is not None and lesion_slice.any():
117
+ overlay = base.copy()
118
+ overlay[..., 0] = np.maximum(overlay[..., 0], lesion_slice.astype(np.uint8) * 255)
119
+ overlay[..., 1] = np.where(lesion_slice > 0, overlay[..., 1] // 3, overlay[..., 1])
120
+ overlay[..., 2] = np.where(lesion_slice > 0, overlay[..., 2] // 3, overlay[..., 2])
121
+ base = (0.5 * base + 0.5 * overlay).astype(np.uint8)
122
+ return Image.fromarray(base)
123
+
124
+
125
+ def _annotate_slice(img: Image.Image, label: str) -> Image.Image:
126
+ canvas = img.convert("RGB")
127
+ draw = ImageDraw.Draw(canvas)
128
+ draw.rounded_rectangle((10, 10, 250, 42), radius=8, fill=(20, 26, 42))
129
+ draw.text((18, 18), label, fill=(255, 255, 255))
130
+ return canvas
131
+
132
+
133
+ def _build_gallery_items(volume_u8: np.ndarray, lesion_mask: np.ndarray, center_z: int) -> tuple[list[tuple[Image.Image, str]], list[int]]:
134
+ d = volume_u8.shape[0]
135
+ indices = sorted({int(np.clip(center_z + delta, 0, d - 1)) for delta in (-12, -8, -4, 0, 4, 8, 12)})
136
+ gallery: list[tuple[Image.Image, str]] = []
137
+ for z in indices:
138
+ lesion_slice = lesion_mask[z] if lesion_mask is not None else None
139
+ img = _volume_to_rgb(volume_u8[z], lesion_slice)
140
+ img = _annotate_slice(img.resize((288, 288)), f"Axial slice {z}")
141
+ gallery.append((img, f"Slice {z}"))
142
+ return gallery, indices
143
+
144
+
145
+ def _slice_detail(volume_u8: np.ndarray, lesion_mask: np.ndarray, z_index: int) -> tuple[Image.Image, str]:
146
+ lesion_slice = lesion_mask[z_index] if lesion_mask is not None else None
147
+ img = _volume_to_rgb(volume_u8[z_index], lesion_slice)
148
+ img = _annotate_slice(img.resize((640, 640)), f"Axial slice {z_index}")
149
+ lesion_area = int(lesion_slice.sum()) if lesion_slice is not None else 0
150
+ desc = (
151
+ f"### Slice {z_index}\n"
152
+ f"- Overlay mode: heuristic lesion candidate\n"
153
+ f"- Highlighted pixels: {lesion_area}\n"
154
+ f"- View: axial CT slice enlarged from the progression viewer"
155
+ )
156
+ return img, desc
157
+
158
+
159
+ def _timeline_figure(case_manifest: dict[str, Any]) -> go.Figure:
160
+ xs = []
161
+ ys = []
162
+ labels = []
163
+ for tp in case_manifest.get("timepoints", []):
164
+ xs.append(tp.get("relative_time", len(xs) + 1))
165
+ ys.append(tp.get("lesion_voxel_count", 0))
166
+ labels.append(tp.get("label", tp.get("timepoint_id", "t")))
167
+ fig = go.Figure()
168
+ fig.add_trace(
169
+ go.Scatter(
170
+ x=xs,
171
+ y=ys,
172
+ mode="lines+markers",
173
+ line=dict(color="#6366f1", width=3),
174
+ marker=dict(size=9, color="#ef4444"),
175
+ text=labels,
176
+ hovertemplate="Timepoint %{text}<br>Relative time %{x}<br>Candidate lesion voxels %{y}<extra></extra>",
177
+ )
178
+ )
179
+ fig.update_layout(
180
+ title="Existing Progression Timeline",
181
+ margin=dict(l=20, r=20, t=48, b=20),
182
+ height=280,
183
+ xaxis_title="Relative time",
184
+ yaxis_title="Candidate lesion volume proxy",
185
+ template="plotly_white",
186
+ )
187
+ return fig
188
+
189
+
190
+ def _viewer_plot(volume_u8: np.ndarray, lesion_mask: np.ndarray, timepoint_meta: dict[str, Any]) -> go.Figure:
191
+ body = _body_mask_from_volume(volume_u8)
192
+ body_pts = _surface_points(body)
193
+ lesion_pts = _lesion_points(lesion_mask)
194
+ d, h, w = volume_u8.shape
195
+ fig = go.Figure()
196
+
197
+ if len(body_pts) > 0:
198
+ texts = []
199
+ colors = []
200
+ for z, y, x in body_pts:
201
+ region, note = _region_name(int(z), int(y), int(x), volume_u8.shape)
202
+ intensity = int(volume_u8[int(z), int(y), int(x)])
203
+ texts.append(
204
+ f"<b>Body surface sample</b><br>Region: {region}<br>Display intensity: {intensity}<br>{note}"
205
+ )
206
+ colors.append(z)
207
+ fig.add_trace(
208
+ go.Scatter3d(
209
+ x=body_pts[:, 2],
210
+ y=body_pts[:, 1],
211
+ z=body_pts[:, 0],
212
+ mode="markers",
213
+ name="Body envelope",
214
+ marker=dict(size=2.2, color=colors, colorscale="Blues", opacity=0.15, showscale=False),
215
+ text=texts,
216
+ hovertemplate="%{text}<extra></extra>",
217
+ )
218
+ )
219
+
220
+ if len(lesion_pts) > 0:
221
+ lesion_text = []
222
+ confidence = timepoint_meta.get("lesion_confidence", 0.35)
223
+ for z, y, x in lesion_pts:
224
+ region, note = _region_name(int(z), int(y), int(x), volume_u8.shape)
225
+ lesion_text.append(
226
+ f"<b>Candidate lesion ROI</b><br>Region: {region}<br>Confidence: {confidence:.2f}<br>{note}<br>Source: {timepoint_meta.get('lesion_source', 'heuristic') }"
227
+ )
228
+ fig.add_trace(
229
+ go.Scatter3d(
230
+ x=lesion_pts[:, 2],
231
+ y=lesion_pts[:, 1],
232
+ z=lesion_pts[:, 0],
233
+ mode="markers",
234
+ name="Candidate lesion",
235
+ marker=dict(size=3.8, color="#ef4444", opacity=0.78),
236
+ text=lesion_text,
237
+ hovertemplate="%{text}<extra></extra>",
238
+ )
239
+ )
240
+
241
+ fig.update_layout(
242
+ title=f"{timepoint_meta.get('label', 'Timepoint')} | 3D existing progression view",
243
+ template="plotly_white",
244
+ height=560,
245
+ margin=dict(l=0, r=0, t=48, b=0),
246
+ legend=dict(orientation="h", yanchor="bottom", y=1.02, x=0.0),
247
+ scene=dict(
248
+ xaxis=dict(title="Left ↔ Right"),
249
+ yaxis=dict(title="Posterior ↔ Anterior"),
250
+ zaxis=dict(title="Superior ↔ Inferior"),
251
+ aspectmode="data",
252
+ camera=dict(eye=dict(x=1.5, y=1.5, z=0.8)),
253
+ ),
254
+ )
255
+ return fig
256
+
257
+
258
+ def _case_markdown(case_manifest: dict[str, Any], package_mode: str) -> str:
259
+ source_text = "Preloaded TCGA-OV demo case" if package_mode == "demo" else "Uploaded imaging package"
260
+ return (
261
+ f"### {case_manifest.get('display_name', case_manifest.get('case_id', 'Case'))}\n"
262
+ f"- Source: {source_text}\n"
263
+ f"- Patient ID: `{case_manifest.get('patient_id', case_manifest.get('case_id', 'unknown'))}`\n"
264
+ f"- Timepoints loaded: {len(case_manifest.get('timepoints', []))}\n"
265
+ f"- Modality: {case_manifest.get('modality', 'CT')}\n"
266
+ f"- Note: `{case_manifest.get('summary_note', 'Existing progression viewer only; no predictive inference in this workspace.')}`"
267
+ )
268
+
269
+
270
+ def _package_root_from_upload(upload_path: str) -> Path:
271
+ upload_path = str(upload_path or "")
272
+ if not upload_path:
273
+ raise ValueError("No imaging package uploaded.")
274
+ src = Path(upload_path)
275
+ if not src.exists():
276
+ raise FileNotFoundError(f"Missing uploaded file: {src}")
277
+ target_dir = UPLOAD_CACHE_ROOT / src.stem
278
+ if target_dir.exists() and (target_dir / "manifest.json").exists():
279
+ return target_dir
280
+ target_dir.mkdir(parents=True, exist_ok=True)
281
+ with zipfile.ZipFile(src, "r") as zf:
282
+ zf.extractall(target_dir)
283
+ manifest_path = target_dir / "manifest.json"
284
+ if not manifest_path.exists():
285
+ raise ValueError("Uploaded package is missing manifest.json.")
286
+ manifest = _json_load(manifest_path)
287
+ if manifest.get("format") != PACKAGE_MAGIC:
288
+ raise ValueError("Uploaded package is not a supported RLDT ovarian imaging package.")
289
+ return target_dir
290
+
291
+
292
+ def available_demo_case_choices() -> list[tuple[str, str]]:
293
+ return [(case.get("display_name", case.get("case_id", "Case")), case.get("case_id", "")) for case in list_demo_cases()]
294
+
295
+
296
+ def initialize_demo_view() -> tuple[Any, ...]:
297
+ cases = list_demo_cases()
298
+ if not cases:
299
+ return (
300
+ gr.update(choices=[], value=None),
301
+ gr.update(choices=[], value=None),
302
+ "No demo imaging package found.",
303
+ None,
304
+ [],
305
+ None,
306
+ "No imaging asset loaded.",
307
+ gr.update(value=None, visible=False),
308
+ {},
309
+ [],
310
+ )
311
+ first_case = cases[0]["case_id"]
312
+ return load_imaging_case("demo", None, first_case, None)
313
+
314
+
315
+ def load_imaging_case(source_mode: str, upload_file: Any, selected_case_id: str | None, selected_timepoint_id: str | None):
316
+ if source_mode == "upload":
317
+ package_root = _package_root_from_upload(getattr(upload_file, "name", upload_file))
318
+ case_manifest = _json_load(package_root / "manifest.json")
319
+ package_mode = "upload"
320
+ else:
321
+ package_root = DEMO_ROOT / "cases" / (selected_case_id or "")
322
+ if not package_root.exists():
323
+ demo_cases = list_demo_cases()
324
+ if not demo_cases:
325
+ raise ValueError("No demo imaging cases are available.")
326
+ package_root = DEMO_ROOT / "cases" / demo_cases[0]["case_id"]
327
+ case_manifest = _load_case_manifest(package_root)
328
+ package_mode = "demo"
329
+
330
+ timepoints = case_manifest.get("timepoints", [])
331
+ if not timepoints:
332
+ raise ValueError("Imaging package contains no timepoints.")
333
+ timepoint_map = {tp["timepoint_id"]: tp for tp in timepoints}
334
+ if selected_timepoint_id not in timepoint_map:
335
+ selected_timepoint_id = timepoints[0]["timepoint_id"]
336
+ tp_meta = timepoint_map[selected_timepoint_id]
337
+ asset_path = package_root / tp_meta["asset_path"]
338
+ data = _load_asset(str(asset_path))
339
+ volume_u8 = data["volume_u8"]
340
+ lesion_mask = data.get("lesion_mask", np.zeros_like(volume_u8, dtype=np.uint8))
341
+ roi_center = data.get("roi_center_zyx", np.array([volume_u8.shape[0] // 2, volume_u8.shape[1] // 2, volume_u8.shape[2] // 2]))
342
+ center_z = int(roi_center[0])
343
+
344
+ fig = _viewer_plot(volume_u8, lesion_mask, tp_meta)
345
+ gallery_items, slice_indices = _build_gallery_items(volume_u8, lesion_mask, center_z)
346
+ detail_img, detail_md = _slice_detail(volume_u8, lesion_mask, slice_indices[len(slice_indices) // 2])
347
+ timeline = _timeline_figure(case_manifest)
348
+ state = {
349
+ "package_root": str(package_root),
350
+ "package_mode": package_mode,
351
+ "case_id": case_manifest.get("case_id"),
352
+ "timepoint_id": selected_timepoint_id,
353
+ }
354
+ case_choices = [(case_manifest.get("display_name", case_manifest.get("case_id", "Case")), case_manifest.get("case_id"))]
355
+ if package_mode == "demo":
356
+ case_choices = available_demo_case_choices()
357
+ tp_choices = [(tp.get("label", tp["timepoint_id"]), tp["timepoint_id"]) for tp in timepoints]
358
+ info_md = _case_markdown(case_manifest, package_mode)
359
+ return (
360
+ gr.update(choices=case_choices, value=case_manifest.get("case_id")),
361
+ gr.update(choices=tp_choices, value=selected_timepoint_id),
362
+ info_md,
363
+ fig,
364
+ gallery_items,
365
+ timeline,
366
+ detail_md,
367
+ gr.update(value=detail_img, visible=True),
368
+ state,
369
+ slice_indices,
370
+ )
371
+
372
+
373
+ def update_imaging_timepoint(state: dict[str, Any], selected_timepoint_id: str):
374
+ if not state:
375
+ raise ValueError("Load an imaging case before changing timepoints.")
376
+ package_root = Path(state["package_root"])
377
+ case_manifest = _load_case_manifest(package_root)
378
+ return load_imaging_case(state.get("package_mode", "demo"), None, case_manifest.get("case_id"), selected_timepoint_id)[2:]
379
+
380
+
381
+ def select_gallery_slice(evt: gr.SelectData, state: dict[str, Any], slice_indices: list[int]):
382
+ if not state or not slice_indices:
383
+ return None, "Load an imaging case first."
384
+ package_root = Path(state["package_root"])
385
+ case_manifest = _load_case_manifest(package_root)
386
+ tp_id = state["timepoint_id"]
387
+ tp_meta = next(tp for tp in case_manifest["timepoints"] if tp["timepoint_id"] == tp_id)
388
+ data = _load_asset(str(package_root / tp_meta["asset_path"]))
389
+ volume_u8 = data["volume_u8"]
390
+ lesion_mask = data.get("lesion_mask", np.zeros_like(volume_u8, dtype=np.uint8))
391
+ index = int(evt.index) if evt is not None else 0
392
+ index = max(0, min(index, len(slice_indices) - 1))
393
+ return _slice_detail(volume_u8, lesion_mask, slice_indices[index])
394
+
395
+
396
+ def imaging_upload_requirements_md() -> str:
397
+ return (
398
+ "#### Imaging package format\n"
399
+ "- Upload a `.zip` package generated by the RLDT ovarian imaging exporter.\n"
400
+ "- Each package contains one case, multiple timepoints, and preprocessed `.npz` imaging assets.\n"
401
+ "- Optional lesion masks are supported. If missing, the viewer falls back to a heuristic pelvic lesion candidate.\n"
402
+ "- This workspace is for **existing progression visualization only**; it does not run predictive modeling."
403
+ )
tools/build_tcga_ov_imaging_demo.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import base64
5
+ import json
6
+ import shutil
7
+ import zlib
8
+ from pathlib import Path
9
+
10
+ import numpy as np
11
+ import pandas as pd
12
+ import SimpleITK as sitk
13
+ from PIL import Image, ImageDraw
14
+ from scipy import ndimage as ndi
15
+
16
+ RAW_SEL = Path('/home/xqin5/3DTreatment/data/processed/tcga_ov/tcga_ov_series_selection.csv')
17
+ OUT_ROOT = Path('/home/xqin5/RLDTALL/online/DT_hf_space/RL0910/demo_imaging_cases')
18
+ PARTNER_OUT = Path('/home/xqin5/RLDTALL/online/ovarian_imaging_partner_test_data')
19
+ PACKAGE_MAGIC = 'rldt_ov_imaging_package'
20
+ DEMO_PATIENTS = ['TCGA-25-1633', 'TCGA-25-1328', 'TCGA-25-2393']
21
+ MAX_TIMEPOINTS = 4
22
+ TARGET_SHAPE = (80, 160, 160) # z, y, x
23
+
24
+
25
+ def read_series(series_dir: Path) -> tuple[np.ndarray, tuple[float, float, float]]:
26
+ reader = sitk.ImageSeriesReader()
27
+ ids = reader.GetGDCMSeriesIDs(str(series_dir))
28
+ if not ids:
29
+ raise FileNotFoundError(f'No DICOM series in {series_dir}')
30
+ files = reader.GetGDCMSeriesFileNames(str(series_dir), ids[0])
31
+ reader.SetFileNames(files)
32
+ image = reader.Execute()
33
+ arr = sitk.GetArrayFromImage(image).astype(np.int16)
34
+ spacing = image.GetSpacing() # x, y, z
35
+ return arr, (float(spacing[2]), float(spacing[1]), float(spacing[0]))
36
+
37
+
38
+ def resample_nn(arr: np.ndarray, out_shape: tuple[int, int, int], order: int) -> np.ndarray:
39
+ zoom = [o / i for o, i in zip(out_shape, arr.shape)]
40
+ return ndi.zoom(arr, zoom=zoom, order=order)
41
+
42
+
43
+ def build_body_mask(volume_hu: np.ndarray) -> np.ndarray:
44
+ body = volume_hu > -350
45
+ labels, n = ndi.label(body)
46
+ if n <= 0:
47
+ return body
48
+ sizes = np.bincount(labels.ravel())
49
+ sizes[0] = 0
50
+ return labels == int(sizes.argmax())
51
+
52
+
53
+ def heuristic_lesion_mask(volume_hu: np.ndarray, body_mask: np.ndarray) -> tuple[np.ndarray, np.ndarray, float, str]:
54
+ z_idx = np.where(body_mask.any(axis=(1, 2)))[0]
55
+ if len(z_idx) == 0:
56
+ center = np.array([volume_hu.shape[0] // 2, volume_hu.shape[1] // 2, volume_hu.shape[2] // 2], dtype=np.int16)
57
+ lesion = np.zeros_like(body_mask, dtype=np.uint8)
58
+ lesion[max(center[0]-3,0):center[0]+4, max(center[1]-10,0):center[1]+11, max(center[2]-10,0):center[2]+11] = 1
59
+ return lesion, center, 0.1, 'fallback_box'
60
+ z0 = int(np.percentile(z_idx, 60))
61
+ z1 = int(np.percentile(z_idx, 90))
62
+ proj = body_mask[z0:z1].max(axis=0)
63
+ yy, xx = np.where(proj)
64
+ cy = float(np.mean(yy)) if len(yy) else volume_hu.shape[1] / 2
65
+ cx = float(np.mean(xx)) if len(xx) else volume_hu.shape[2] / 2
66
+ H, W = volume_hu.shape[1:]
67
+ Y, X = np.ogrid[:H, :W]
68
+ central = ((Y - cy) ** 2 / (0.18 * H) ** 2 + (X - cx) ** 2 / (0.16 * W) ** 2) <= 1.0
69
+ roi = np.zeros_like(body_mask, dtype=bool)
70
+ roi[z0:z1] = central
71
+ candidate = (volume_hu > 145) & (volume_hu < 280) & body_mask & roi
72
+ candidate = ndi.binary_opening(candidate, structure=np.ones((1, 3, 3)))
73
+ candidate = ndi.binary_closing(candidate, structure=np.ones((1, 5, 5)))
74
+ labels, n = ndi.label(candidate)
75
+ best_mask = None
76
+ best_score = -1.0
77
+ center = np.array([int((z0 + z1) / 2), int(cy), int(cx)], dtype=np.int16)
78
+ if n > 0:
79
+ coords_center = center.astype(float)
80
+ for idx in range(1, n + 1):
81
+ comp = labels == idx
82
+ vox = int(comp.sum())
83
+ if vox < 200:
84
+ continue
85
+ pts = np.argwhere(comp)
86
+ centroid = pts.mean(axis=0)
87
+ dist = float(np.linalg.norm((centroid - coords_center) / np.array([8.0, 18.0, 18.0])))
88
+ score = vox / (1.0 + dist * 10.0)
89
+ if score > best_score:
90
+ best_score = score
91
+ best_mask = comp
92
+ center = centroid.astype(np.int16)
93
+ if best_mask is None:
94
+ best_mask = np.zeros_like(body_mask, dtype=bool)
95
+ cz, cyi, cxi = [int(v) for v in center]
96
+ rz, ry, rx = 4, 12, 12
97
+ Z, Yg, Xg = np.ogrid[:volume_hu.shape[0], :volume_hu.shape[1], :volume_hu.shape[2]]
98
+ ell = ((Z-cz)**2/rz**2 + (Yg-cyi)**2/ry**2 + (Xg-cxi)**2/rx**2) <= 1.0
99
+ best_mask = ell & body_mask
100
+ confidence = 0.22
101
+ source = 'heuristic_ellipsoid'
102
+ else:
103
+ confidence = 0.42
104
+ source = 'heuristic_component'
105
+ return best_mask.astype(np.uint8), center.astype(np.int16), confidence, source
106
+
107
+
108
+ def normalize_to_u8(volume_hu: np.ndarray) -> np.ndarray:
109
+ windowed = np.clip(volume_hu, -150, 250)
110
+ scaled = ((windowed + 150.0) / 400.0) * 255.0
111
+ return scaled.astype(np.uint8)
112
+
113
+
114
+ def sample_timepoints(df: pd.DataFrame) -> pd.DataFrame:
115
+ df = df.sort_values(['relative_time', 'Number of Images'], ascending=[True, False]).drop_duplicates('timepoint')
116
+ if len(df) <= MAX_TIMEPOINTS:
117
+ return df
118
+ idxs = np.linspace(0, len(df) - 1, MAX_TIMEPOINTS, dtype=int)
119
+ return df.iloc[idxs]
120
+
121
+
122
+ def build_case(case_df: pd.DataFrame, case_root: Path) -> dict:
123
+ case_root.mkdir(parents=True, exist_ok=True)
124
+ manifest = {
125
+ 'format': PACKAGE_MAGIC,
126
+ 'version': 1,
127
+ 'case_id': case_df.iloc[0]['patient_id'],
128
+ 'patient_id': case_df.iloc[0]['patient_id'],
129
+ 'display_name': f"TCGA-OV {case_df.iloc[0]['patient_id']}",
130
+ 'modality': 'CT',
131
+ 'summary_note': 'Existing progression visualization built from preprocessed TCGA-OV CT series. Lesion highlighting is heuristic when no label is available.',
132
+ 'timepoints': [],
133
+ }
134
+ for tp_idx, (_, row) in enumerate(case_df.iterrows(), start=1):
135
+ volume_hu, spacing_zyx = read_series(Path(row['image_path']))
136
+ body = build_body_mask(volume_hu)
137
+ volume_hu_small = resample_nn(volume_hu, TARGET_SHAPE, order=1).astype(np.int16)
138
+ body_small = resample_nn(body.astype(np.uint8), TARGET_SHAPE, order=0) > 0
139
+ lesion_mask, roi_center, confidence, source = heuristic_lesion_mask(volume_hu_small, body_small)
140
+ volume_u8 = normalize_to_u8(volume_hu_small)
141
+ asset_name = f'tp_{tp_idx:02d}.json'
142
+ payload = {
143
+ 'shape': list(volume_u8.shape),
144
+ 'roi_center_zyx': [int(v) for v in roi_center.tolist()],
145
+ 'volume_u8_b85': base64.b85encode(zlib.compress(volume_u8.tobytes(), level=9)).decode('ascii'),
146
+ 'lesion_mask_b85': base64.b85encode(zlib.compress(lesion_mask.astype(np.uint8).tobytes(), level=9)).decode('ascii'),
147
+ }
148
+ (case_root / asset_name).write_text(json.dumps(payload), encoding='utf-8')
149
+ manifest['timepoints'].append({
150
+ 'timepoint_id': f't{tp_idx:02d}',
151
+ 'label': str(row['timepoint']),
152
+ 'relative_time': float(row['relative_time']),
153
+ 'asset_path': asset_name,
154
+ 'series_description': str(row['Series Description']),
155
+ 'num_slices_original': int(row['Number of Images']),
156
+ 'spacing_zyx_mm': [float(v) for v in spacing_zyx],
157
+ 'lesion_confidence': float(confidence),
158
+ 'lesion_source': source,
159
+ 'lesion_voxel_count': int(lesion_mask.sum()),
160
+ })
161
+ (case_root / 'manifest.json').write_text(json.dumps(manifest, indent=2), encoding='utf-8')
162
+ return manifest
163
+
164
+
165
+ def main() -> None:
166
+ OUT_ROOT.mkdir(parents=True, exist_ok=True)
167
+ (OUT_ROOT / 'cases').mkdir(parents=True, exist_ok=True)
168
+ PARTNER_OUT.mkdir(parents=True, exist_ok=True)
169
+ sel = pd.read_csv(RAW_SEL)
170
+ root_manifest = {'format': PACKAGE_MAGIC, 'version': 1, 'cases': []}
171
+ built_case_dirs = []
172
+ for pid in DEMO_PATIENTS:
173
+ case_df = sample_timepoints(sel[sel['patient_id'] == pid])
174
+ case_root = OUT_ROOT / 'cases' / pid
175
+ if case_root.exists():
176
+ shutil.rmtree(case_root)
177
+ case_manifest = build_case(case_df, case_root)
178
+ built_case_dirs.append(case_root)
179
+ root_manifest['cases'].append({
180
+ 'case_id': pid,
181
+ 'display_name': case_manifest['display_name'],
182
+ 'timepoint_count': len(case_manifest['timepoints']),
183
+ })
184
+ partner_zip = PARTNER_OUT / f'{pid}_ovarian_imaging_package.zip'
185
+ if partner_zip.exists():
186
+ partner_zip.unlink()
187
+ shutil.make_archive(str(partner_zip.with_suffix('')), 'zip', root_dir=case_root)
188
+ (OUT_ROOT / 'manifest.json').write_text(json.dumps(root_manifest, indent=2), encoding='utf-8')
189
+ readme = PARTNER_OUT / 'README.txt'
190
+ readme.write_text(
191
+ 'RLDT ovarian imaging upload package\n\n'
192
+ '- Upload one .zip package at a time in the Space imaging viewer.\n'
193
+ '- Package format is preprocessed multi-timepoint CT for visualization only.\n'
194
+ '- If no lesion mask exists, the viewer uses a heuristic pelvic lesion candidate overlay.\n',
195
+ encoding='utf-8',
196
+ )
197
+
198
+
199
+ if __name__ == '__main__':
200
+ main()