Mattias Cosarinsky commited on
Commit
39f341b
·
1 Parent(s): 9ad702e
Files changed (2) hide show
  1. app.py +104 -52
  2. utils.py +94 -30
app.py CHANGED
@@ -1,82 +1,134 @@
1
  # app.py
2
 
3
  import gradio as gr
4
- from utils import HF_REPO, ROIS, load_roi_image, load_images, get_hypotheses_for_selection
5
 
6
- # -------- CONFIGURATION --------
7
  MODELS = ["SAE", "SAE_ICA", "ICA", "PCA", "NMF", "Voxel"]
8
 
9
- # Initial values derived from the helper functions after the structure is loaded
10
  INITIAL_HYPOTHESES = get_hypotheses_for_selection(MODELS[0], ROIS[0])
11
- INITIAL_ROI_IMAGE = load_roi_image(ROIS[0])
 
 
 
 
12
 
13
- # -------- GRADIO UI AND CALLBACK LOGIC --------
 
14
  def update_hypotheses(model, roi):
15
  new_hypotheses = get_hypotheses_for_selection(model, roi)
16
  return gr.Dropdown(choices=new_hypotheses, value=new_hypotheses[0] if new_hypotheses else None)
17
 
18
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  gr.Markdown("## BrainExplore: Visual Concept Explorer")
20
 
21
- # 1. TOP ROW: Dropdowns (Menus)
22
  with gr.Row():
23
- model_dropdown = gr.Dropdown(label="Select Model", choices=MODELS, value=MODELS[0], scale=1)
24
- roi_dropdown = gr.Dropdown(label="Select ROI", choices=ROIS, value=ROIS[0], scale=1)
25
-
 
 
 
26
  hyp_dropdown = gr.Dropdown(
27
- label="Select Hypothesis",
28
  choices=INITIAL_HYPOTHESES,
29
- value=INITIAL_HYPOTHESES[0] if INITIAL_HYPOTHESES else None,
30
- scale=1
31
  )
32
 
33
- # 2. SECOND ROW: Visualizations (ROI Image and Gallery)
34
  with gr.Row(equal_height=True):
35
-
36
- roi_image = gr.Image(
37
- label="Selected Brain Region (ROI)",
38
- value=INITIAL_ROI_IMAGE,
39
- interactive=False,
40
- height=600,
41
- scale=1
42
- )
43
-
44
- image_gallery = gr.Gallery(
45
- label="Top Activating Images",
46
- columns=None, rows=1, scale=3,
47
- object_fit="contain", preview=True, height=600
48
- )
49
 
50
- # -------- CALLBACKS --------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  model_dropdown.change(
52
- fn=update_hypotheses,
53
- inputs=[model_dropdown, roi_dropdown],
54
- outputs=[hyp_dropdown],
55
- queue=False
56
- ).then(
57
- fn=load_images,
58
- inputs=[model_dropdown, roi_dropdown, hyp_dropdown],
59
- outputs=[image_gallery]
60
  )
61
 
62
- # ROI Change: Update Hypotheses, THEN update ROI Image, THEN update Activating Images
63
  roi_dropdown.change(
64
- fn=update_hypotheses,
65
- inputs=[model_dropdown, roi_dropdown],
66
- outputs=[hyp_dropdown],
67
- queue=False
68
  ).then(
69
- fn=load_roi_image,
70
- inputs=[roi_dropdown],
71
- outputs=[roi_image]
72
- ).then(
73
- fn=load_images,
74
- inputs=[model_dropdown, roi_dropdown, hyp_dropdown],
75
- outputs=[image_gallery]
 
 
 
 
 
 
 
 
 
 
 
76
  )
77
 
78
- # Hypothesis Change: Update Activating Images only
79
- hyp_dropdown.change(fn=load_images, inputs=[model_dropdown, roi_dropdown, hyp_dropdown], outputs=[image_gallery])
80
 
81
  if __name__ == "__main__":
82
- demo.launch()
 
1
  # app.py
2
 
3
  import gradio as gr
4
+ from utils import BEST_ACROSS, ROIS, load_roi_image, get_hypotheses_for_selection, load_images_and_views, load_roi_for_selection
5
 
 
6
  MODELS = ["SAE", "SAE_ICA", "ICA", "PCA", "NMF", "Voxel"]
7
 
 
8
  INITIAL_HYPOTHESES = get_hypotheses_for_selection(MODELS[0], ROIS[0])
9
+ INITIAL_FID_IMAGES, INITIAL_VIEW_IMAGES = load_images_and_views(
10
+ MODELS[0], ROIS[0], INITIAL_HYPOTHESES[0]
11
+ )
12
+ INITIAL_VIEW_IMAGE = None
13
+ INITIAL_ROI_IMAGE = None
14
 
15
+
16
+ # -------- CALLBACKS --------
17
  def update_hypotheses(model, roi):
18
  new_hypotheses = get_hypotheses_for_selection(model, roi)
19
  return gr.Dropdown(choices=new_hypotheses, value=new_hypotheses[0] if new_hypotheses else None)
20
 
21
+
22
+ def update_activating_images(model, roi, hyp):
23
+ fid_images, brain_images = load_images_and_views(model, roi, hyp)
24
+
25
+ # For Across, fetch the ROI image dynamically
26
+ roi_img = None
27
+ if roi.lower() == "across":
28
+ override_roi = BEST_ACROSS.get(model, {}).get(hyp, {}).get("roi")
29
+ roi_img = load_roi_image(roi_name=override_roi)
30
+ else:
31
+ roi_img = load_roi_image(roi_name=roi)
32
+
33
+ initial_view = brain_images[0] if brain_images else None
34
+ return fid_images, brain_images, initial_view, roi_img
35
+
36
+
37
+
38
+ def update_view_on_click(evt: gr.SelectData, brain_images):
39
+ idx = evt.index
40
+ if brain_images and idx is not None:
41
+ return brain_images[idx]
42
+ return None
43
+
44
+
45
+ # -------- UI --------
46
+ with gr.Blocks(fill_width=True) as demo:
47
+
48
+ brain_state = gr.State(value=INITIAL_VIEW_IMAGES)
49
+
50
  gr.Markdown("## BrainExplore: Visual Concept Explorer")
51
 
 
52
  with gr.Row():
53
+ model_dropdown = gr.Dropdown(
54
+ label="Select Model", choices=MODELS, value=MODELS[0], scale=1
55
+ )
56
+ roi_dropdown = gr.Dropdown(
57
+ label="Select ROI", choices=ROIS, value=ROIS[0], scale=1
58
+ )
59
  hyp_dropdown = gr.Dropdown(
60
+ label="Select Hypothesis",
61
  choices=INITIAL_HYPOTHESES,
62
+ value=INITIAL_HYPOTHESES[0],
63
+ scale=1,
64
  )
65
 
 
66
  with gr.Row(equal_height=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ # LEFT ROI + VIEW image
69
+ with gr.Column(scale=1):
70
+ roi_image = gr.Image(
71
+ label="Selected Brain Region (ROI)",
72
+ value=None, # empty initially
73
+ interactive=False,
74
+ height=300,
75
+ )
76
+
77
+ view_image = gr.Image(
78
+ label="Viewing Angle",
79
+ value=None, # empty initially
80
+ interactive=False,
81
+ height=300,
82
+ )
83
+
84
+ # RIGHT — gallery
85
+ with gr.Column(scale=3):
86
+ image_gallery = gr.Gallery(
87
+ label="Top Activating Images",
88
+ columns=None,
89
+ rows=1,
90
+ object_fit="contain",
91
+ preview=True,
92
+ height=600,
93
+ allow_preview=True,
94
+ elem_id="gallery"
95
+ )
96
+
97
+
98
+ # -------- CALLBACK WIRING --------
99
+
100
+ # Update hypotheses when model/ROI changes
101
  model_dropdown.change(
102
+ fn=update_hypotheses,
103
+ inputs=[model_dropdown, roi_dropdown],
104
+ outputs=[hyp_dropdown],
 
 
 
 
 
105
  )
106
 
 
107
  roi_dropdown.change(
108
+ fn=update_hypotheses,
109
+ inputs=[model_dropdown, roi_dropdown],
110
+ outputs=[hyp_dropdown],
 
111
  ).then(
112
+ fn=lambda model, roi, hyp: load_roi_for_selection(model, roi, hyp),
113
+ inputs=[model_dropdown, roi_dropdown, hyp_dropdown],
114
+ outputs=[roi_image],
115
+ )
116
+
117
+ # Update gallery + brain state + initial view image
118
+ for cb in [model_dropdown, roi_dropdown, hyp_dropdown]:
119
+ cb.change(
120
+ fn=update_activating_images,
121
+ inputs=[model_dropdown, roi_dropdown, hyp_dropdown],
122
+ outputs=[image_gallery, brain_state, view_image, roi_image],
123
+ )
124
+
125
+ # Update view image when clicking a different gallery item
126
+ image_gallery.select(
127
+ fn=update_view_on_click,
128
+ inputs=[brain_state],
129
+ outputs=[view_image],
130
  )
131
 
 
 
132
 
133
  if __name__ == "__main__":
134
+ demo.launch()
utils.py CHANGED
@@ -2,13 +2,31 @@ import json
2
  from datasets import load_dataset
3
  from PIL import Image
4
  from huggingface_hub import hf_hub_download
 
5
  import os
6
 
7
  # -------- CONFIG & STATE --------
8
  HF_REPO = "mcosarinsky/BrainExplore-Images"
9
  STRUCTURE_DATA = None
10
- ROIS = ["EBA","FBA-1","FBA-2","FFA-1","FFA-2","OFA","OPA","OWFA","PPA","RSC","VWFA-1","VWFA-2","hV4"]
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # -------- 1. JSON STRUCTURE HELPERS --------
13
  def load_structure(repo_id, filename="structure.json"):
14
  """
@@ -36,18 +54,19 @@ def load_structure(repo_id, filename="structure.json"):
36
 
37
  def get_hypotheses_for_selection(model, roi):
38
  """
39
- Retrieves the list of hypotheses based on the selected model and ROI
40
- from the pre-loaded STRUCTURE_DATA.
41
  """
42
- if not STRUCTURE_DATA:
43
- return []
44
-
45
- try:
46
- hypotheses = STRUCTURE_DATA.get(model, {}).get(roi, [])
47
- return sorted(hypotheses)
48
- except Exception as e:
49
- print(f"Error accessing structure data for {model}/{roi}: {e}")
50
  return []
 
 
 
 
 
51
 
52
  # -------- 2. IMAGE PROCESSING HELPERS --------
53
  def pad_image(pil_img, padding_x=10, padding_y=0, color=(255, 255, 255)):
@@ -61,28 +80,63 @@ def pad_image(pil_img, padding_x=10, padding_y=0, color=(255, 255, 255)):
61
  new_img.paste(pil_img, (padding_x, padding_y))
62
  return new_img
63
 
64
-
65
- # -------- 3. DATA LOADING HELPERS --------
66
- def load_images(model, roi, hyp):
67
  """
68
- Loads top activating images for the given parameters and applies padding.
 
69
  """
70
- folder_path = f"{model}/{roi}/{hyp}"
71
- ds = load_dataset(HF_REPO, data_dir=folder_path, split="train", streaming=True)
72
- image_paths = [img["image"] for img in ds.take(2)]
73
- padded_images = [pad_image(img, padding_x=10, padding_y=0) for img in image_paths]
74
- return padded_images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- def load_roi_image(roi_name):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  """
78
- Downloads the ROI PNG from the Hugging Face Hub, looking inside the 'ROI/' folder,
79
- and loads it as a PIL Image.
 
80
  """
81
  if not roi_name:
82
  return None
83
-
84
- file_name = f"ROI/{roi_name}.png"
85
-
 
 
 
86
  try:
87
  local_path = hf_hub_download(
88
  repo_id=HF_REPO,
@@ -91,9 +145,19 @@ def load_roi_image(roi_name):
91
  )
92
  return Image.open(local_path)
93
  except Exception as e:
94
- # A 404 error here likely means the file doesn't exist under that path
95
- print(f"Error loading ROI image {roi_name} via hf_hub_download. Path used: {file_name}. Details: {e}")
96
  return None
 
 
 
 
 
 
 
 
 
 
 
97
 
98
- # Perform initial structure load for dynamic dependency resolution in app.py
99
- load_structure(HF_REPO)
 
2
  from datasets import load_dataset
3
  from PIL import Image
4
  from huggingface_hub import hf_hub_download
5
+ from pathlib import Path
6
  import os
7
 
8
  # -------- CONFIG & STATE --------
9
  HF_REPO = "mcosarinsky/BrainExplore-Images"
10
  STRUCTURE_DATA = None
11
+ ROIS = ["Across","EBA","FBA-1","FBA-2","FFA-1","FFA-2","OFA","OPA","OWFA","PPA","RSC","VWFA-1","VWFA-2","hV4"]
12
 
13
+ # Load best_across.json once
14
+ BEST_ACROSS = None
15
+
16
+ def load_best_across():
17
+ global BEST_ACROSS
18
+ if BEST_ACROSS is not None:
19
+ return BEST_ACROSS
20
+ try:
21
+ path = hf_hub_download(repo_id=HF_REPO, filename="best_across.json", repo_type="dataset")
22
+ with open(path, "r") as f:
23
+ BEST_ACROSS = json.load(f)
24
+ return BEST_ACROSS
25
+ except Exception as e:
26
+ print(f"Error loading best_across.json: {e}")
27
+ BEST_ACROSS = {}
28
+ return BEST_ACROSS
29
+
30
  # -------- 1. JSON STRUCTURE HELPERS --------
31
  def load_structure(repo_id, filename="structure.json"):
32
  """
 
54
 
55
  def get_hypotheses_for_selection(model, roi):
56
  """
57
+ Normal ROIs: load from structure.json
58
+ Across ROI: load from best_across.json
59
  """
60
+ model = model # assume already mapped
61
+ if roi.lower() == "across":
62
+ if model in BEST_ACROSS:
63
+ return sorted(BEST_ACROSS[model].keys())
 
 
 
 
64
  return []
65
+ else:
66
+ # existing STRUCTURE_DATA logic
67
+ if not STRUCTURE_DATA:
68
+ return []
69
+ return sorted(STRUCTURE_DATA.get(model, {}).get(roi, []))
70
 
71
  # -------- 2. IMAGE PROCESSING HELPERS --------
72
  def pad_image(pil_img, padding_x=10, padding_y=0, color=(255, 255, 255)):
 
80
  new_img.paste(pil_img, (padding_x, padding_y))
81
  return new_img
82
 
83
+ def load_images_and_views(model, roi, hyp):
 
 
84
  """
85
+ Normal ROI: same as before (load FID & brain from folder)
86
+ Across ROI: load single FID and brain as per best_across.json
87
  """
88
+ if roi.lower() == "across":
89
+ model_dict = BEST_ACROSS.get(model, {})
90
+ hyp_entry = model_dict.get(hyp)
91
+ if not hyp_entry:
92
+ return [], []
93
+
94
+ roi_name = hyp_entry["roi"]
95
+ fid_file = hyp_entry["fid_file"]
96
+ brain_file = hyp_entry.get("brain_file")
97
+
98
+ ds = load_dataset(HF_REPO, data_dir=f"{model}/{roi_name}/{hyp}", split="train")
99
+ fid_image = None
100
+ brain_image = None
101
+
102
+ for item in ds:
103
+ img = item["image"]
104
+ img_name = Path(img.filename).name
105
+ if img_name == fid_file:
106
+ fid_image = pad_image(img, padding_x=10, padding_y=0)
107
+ elif brain_file and img_name == brain_file:
108
+ brain_image = img
109
 
110
+ return [fid_image] if fid_image else [], [brain_image] if brain_image else []
111
+
112
+ else:
113
+ # normal loading
114
+ folder_path = f"{model}/{roi}/{hyp}"
115
+ ds = load_dataset(HF_REPO, data_dir=folder_path, split="train")
116
+ fid_images, brain_images = [], []
117
+ for item in ds:
118
+ img = item["image"]
119
+ img_name = Path(img.filename).name
120
+ if "fid" in img_name.lower():
121
+ fid_images.append(pad_image(img, padding_x=10, padding_y=0))
122
+ elif "brain" in img_name.lower():
123
+ brain_images.append(img)
124
+ return fid_images, brain_images
125
+
126
+ def load_roi_image(roi_name, override_roi=None):
127
  """
128
+ Downloads the ROI PNG from the Hugging Face Hub and loads it as a PIL Image.
129
+
130
+ If roi_name is 'Across' and override_roi is provided, it loads that ROI instead.
131
  """
132
  if not roi_name:
133
  return None
134
+
135
+ # Use override if provided (for Across)
136
+ actual_roi = override_roi if override_roi else roi_name
137
+
138
+ file_name = f"ROI/{actual_roi}.png"
139
+
140
  try:
141
  local_path = hf_hub_download(
142
  repo_id=HF_REPO,
 
145
  )
146
  return Image.open(local_path)
147
  except Exception as e:
148
+ print(f"Error loading ROI image {actual_roi} via hf_hub_download. Path used: {file_name}. Details: {e}")
 
149
  return None
150
+
151
+ def load_roi_for_selection(model, roi, hyp):
152
+ """
153
+ Loads ROI image. If 'Across' is selected, use the original ROI
154
+ from BEST_ACROSS for the selected hypothesis.
155
+ """
156
+ override_roi = None
157
+ if roi.lower() == "across":
158
+ override_roi = BEST_ACROSS.get(model, {}).get(hyp, {}).get("roi")
159
+ return load_roi_image(roi_name=roi, override_roi=override_roi)
160
+
161
 
162
+ load_structure(HF_REPO)
163
+ load_best_across()