Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .gitattributes +3 -0
- .gradio/certificate.pem +31 -0
- Final_similarity_matrix.csv +3 -0
- Final_similarity_matrix2.csv +3 -0
- README.md +3 -9
- app.py +323 -0
- brain/__pycache__/roi_analyzer.cpython-311.pyc +0 -0
- brain/__pycache__/roi_analyzer.cpython-39.pyc +0 -0
- brain/roi_analyzer.py +111 -0
- config/__pycache__/roi_config.cpython-311.pyc +0 -0
- config/__pycache__/roi_config.cpython-39.pyc +0 -0
- config/roi_config.py +44 -0
- data/__pycache__/data_loader.cpython-311.pyc +0 -0
- data/__pycache__/data_loader.cpython-39.pyc +0 -0
- data/data_loader.py +112 -0
- gui/__pycache__/gradio_interface.cpython-311.pyc +0 -0
- gui/__pycache__/gradio_interface.cpython-39.pyc +0 -0
- gui/gradio_interface.py +189 -0
- overall_database.csv +3 -0
- requirements.txt +465 -0
- test.py +722 -0
- visualization/__pycache__/image_viewer.cpython-311.pyc +0 -0
- visualization/__pycache__/image_viewer.cpython-39.pyc +0 -0
- visualization/__pycache__/plot_generator.cpython-311.pyc +0 -0
- visualization/__pycache__/plot_generator.cpython-39.pyc +0 -0
- visualization/image_viewer.py +39 -0
- visualization/plot_generator.py +189 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
Final_similarity_matrix.csv filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
Final_similarity_matrix2.csv filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
overall_database.csv filter=lfs diff=lfs merge=lfs -text
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
Final_similarity_matrix.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ff83586ea8a37e91968eb0b73f23edebd61fdd88f815bef34a0f727c6d5ef35
|
| 3 |
+
size 39820273
|
Final_similarity_matrix2.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cdd2d60df4ebd8de7533d82f7df5d4f8733d06134c641795d2f014c5de561b2
|
| 3 |
+
size 64812304
|
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji: 🔥
|
| 4 |
-
colorFrom: green
|
| 5 |
-
colorTo: pink
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 5.46.0
|
| 8 |
app_file: app.py
|
| 9 |
-
|
|
|
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: similarity_analysis
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
app_file: app.py
|
| 4 |
+
sdk: gradio
|
| 5 |
+
sdk_version: 5.45.0
|
| 6 |
---
|
|
|
|
|
|
app.py
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==================== main.py ====================
|
| 2 |
+
"""Main application orchestrator"""
|
| 3 |
+
|
| 4 |
+
import sys
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
# Add project root to path for imports
|
| 8 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 9 |
+
|
| 10 |
+
from data.data_loader import DataLoader
|
| 11 |
+
from brain.roi_analyzer import ROIAnalyzer
|
| 12 |
+
from visualization.plot_generator import PlotGenerator
|
| 13 |
+
from visualization.image_viewer import ImageViewer
|
| 14 |
+
from gui.gradio_interface import GradioInterface
|
| 15 |
+
from typing import Tuple, Optional, Union, Dict, Any
|
| 16 |
+
import pandas as pd
|
| 17 |
+
|
| 18 |
+
class SimilarityApp:
|
| 19 |
+
"""Main application class that orchestrates all components"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, csv_path: str):
|
| 22 |
+
# Initialize all components
|
| 23 |
+
self.data_loader = DataLoader()
|
| 24 |
+
self.plot_generator = PlotGenerator(self.data_loader)
|
| 25 |
+
self.image_viewer = ImageViewer()
|
| 26 |
+
self.roi_analyzer = ROIAnalyzer()
|
| 27 |
+
self.gui = GradioInterface(self)
|
| 28 |
+
|
| 29 |
+
# Load data
|
| 30 |
+
if not self.data_loader.load_csv(csv_path):
|
| 31 |
+
raise ValueError("Failed to load data")
|
| 32 |
+
|
| 33 |
+
def update_plots(self, brain_measure: str, ml_model_selection: Union[str, int]) -> Tuple[Optional[Any], Optional[Any]]:
|
| 34 |
+
"""Update both 3D and 2D plots"""
|
| 35 |
+
try:
|
| 36 |
+
# Get model data and name
|
| 37 |
+
ml_data, ml_name = self.plot_generator.get_model_data(ml_model_selection)
|
| 38 |
+
|
| 39 |
+
# Store current model for image viewer
|
| 40 |
+
self.set_current_model(ml_model_selection, ml_name)
|
| 41 |
+
|
| 42 |
+
fig_3d = self.plot_generator.create_3d_plot(brain_measure, ml_model_selection)
|
| 43 |
+
fig_2d = self.plot_generator.create_2d_plots(brain_measure, ml_model_selection)
|
| 44 |
+
return fig_3d, fig_2d
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f"Error updating plots: {e}")
|
| 47 |
+
return None, None
|
| 48 |
+
|
| 49 |
+
def get_correlations(self, brain_measure: str, ml_model_selection: Union[str, int]) -> str:
|
| 50 |
+
"""Get correlation statistics with min/max ranges"""
|
| 51 |
+
try:
|
| 52 |
+
data = self.data_loader.data
|
| 53 |
+
ml_data, ml_name = self.plot_generator.get_model_data(ml_model_selection)
|
| 54 |
+
|
| 55 |
+
corr_hb = data['human_judgement'].corr(data[brain_measure])
|
| 56 |
+
corr_hm = data['human_judgement'].corr(ml_data)
|
| 57 |
+
corr_bm = data[brain_measure].corr(ml_data)
|
| 58 |
+
|
| 59 |
+
brain_name = brain_measure.replace('cosine_similarity_roi_values_', '').replace('pearson_correlation_roi_values_', '').title()
|
| 60 |
+
measure_type = "Cosine" if "cosine" in brain_measure else "Pearson"
|
| 61 |
+
|
| 62 |
+
# Calculate min/max correlations across all models for comparison
|
| 63 |
+
all_human_ml_corrs = []
|
| 64 |
+
all_brain_ml_corrs = []
|
| 65 |
+
for model_col in self.data_loader.ml_models:
|
| 66 |
+
human_ml_corr = data['human_judgement'].corr(data[model_col])
|
| 67 |
+
brain_ml_corr = data[brain_measure].corr(data[model_col])
|
| 68 |
+
all_human_ml_corrs.append(human_ml_corr)
|
| 69 |
+
all_brain_ml_corrs.append(brain_ml_corr)
|
| 70 |
+
|
| 71 |
+
min_human_ml = min(all_human_ml_corrs)
|
| 72 |
+
max_human_ml = max(all_human_ml_corrs)
|
| 73 |
+
min_brain_ml = min(all_brain_ml_corrs)
|
| 74 |
+
max_brain_ml = max(all_brain_ml_corrs)
|
| 75 |
+
|
| 76 |
+
stats_text = f"""## Current Analysis
|
| 77 |
+
|
| 78 |
+
### Correlation Results
|
| 79 |
+
| Comparison | Correlation |
|
| 80 |
+
|------------|-------------|
|
| 81 |
+
| Human ↔ Brain ({measure_type}) | **{corr_hb:.3f}** |
|
| 82 |
+
| Human ↔ ML Model | **{corr_hm:.3f}** |
|
| 83 |
+
| Brain ({measure_type}) ↔ ML Model | **{corr_bm:.3f}** |
|
| 84 |
+
|
| 85 |
+
### Dataset Correlation Ranges
|
| 86 |
+
| Comparison | Min | Max |
|
| 87 |
+
|------------|-----|-----|
|
| 88 |
+
| Human ↔ All ML Models | {min_human_ml:.3f} | {max_human_ml:.3f} |
|
| 89 |
+
| Brain ({measure_type}) ↔ All ML Models | {min_brain_ml:.3f} | {max_brain_ml:.3f} |
|
| 90 |
+
|
| 91 |
+
### Dataset Information
|
| 92 |
+
- **Total Image Pairs:** {len(data):,}
|
| 93 |
+
- **Available ML Models:** {len(self.data_loader.ml_models)}
|
| 94 |
+
- **Brain Measure:** {measure_type} {brain_name}
|
| 95 |
+
"""
|
| 96 |
+
return stats_text
|
| 97 |
+
except Exception as e:
|
| 98 |
+
return f"**Error:** {e}"
|
| 99 |
+
|
| 100 |
+
def get_model_rankings_for_pair(self, row_index: int) -> Dict[str, Dict[str, list]]:
|
| 101 |
+
"""Get top 3 best and worst models for each category for a specific image pair"""
|
| 102 |
+
try:
|
| 103 |
+
data = self.data_loader.data
|
| 104 |
+
if row_index >= len(data):
|
| 105 |
+
return {}
|
| 106 |
+
|
| 107 |
+
row = data.iloc[row_index]
|
| 108 |
+
rankings = {}
|
| 109 |
+
|
| 110 |
+
# Get rankings for each category
|
| 111 |
+
for category in ['vision', 'language', 'semantic']:
|
| 112 |
+
if not self.data_loader.model_categories[category]:
|
| 113 |
+
continue
|
| 114 |
+
|
| 115 |
+
# Get all models in this category with their scores
|
| 116 |
+
category_models = [model[0] for model in self.data_loader.model_categories[category]]
|
| 117 |
+
model_scores = [(model, row[model]) for model in category_models]
|
| 118 |
+
|
| 119 |
+
# Sort by score (highest first)
|
| 120 |
+
model_scores.sort(key=lambda x: x[1], reverse=True)
|
| 121 |
+
|
| 122 |
+
# Get top 3 and bottom 3
|
| 123 |
+
top_3 = model_scores[:3]
|
| 124 |
+
bottom_3 = model_scores[-3:]
|
| 125 |
+
|
| 126 |
+
rankings[category] = {
|
| 127 |
+
'best': top_3,
|
| 128 |
+
'worst': bottom_3
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
return rankings
|
| 132 |
+
except Exception as e:
|
| 133 |
+
print(f"Error getting model rankings: {e}")
|
| 134 |
+
return {}
|
| 135 |
+
|
| 136 |
+
def show_image_pair(self, row_index: int) -> Tuple[Optional[Any], Optional[Any], str, Optional[Any]]:
|
| 137 |
+
"""Show specific image pair with details, captions, and model rankings"""
|
| 138 |
+
try:
|
| 139 |
+
data = self.data_loader.data
|
| 140 |
+
if row_index >= len(data):
|
| 141 |
+
return None, None, "Invalid row index", None
|
| 142 |
+
|
| 143 |
+
row = data.iloc[row_index]
|
| 144 |
+
img1, img2 = self.image_viewer.get_image_pair(data, row_index)
|
| 145 |
+
|
| 146 |
+
# Calculate model averages if not already present
|
| 147 |
+
if 'avg_vision' not in data.columns:
|
| 148 |
+
vision_models = [col for col in data.columns if 'BOLD5000_timm_' in col]
|
| 149 |
+
language_models = [col for col in data.columns if 'bert-' in col or 'deberta-' in col or 'sup-simcse' in col]
|
| 150 |
+
semantic_models = [col for col in data.columns if any(x in col for x in ["bm25", "rouge", "tf-idf", "co-occurrence"])]
|
| 151 |
+
|
| 152 |
+
# Normalize each model to 0-1 scale before averaging
|
| 153 |
+
def normalize_models(model_list):
|
| 154 |
+
if not model_list:
|
| 155 |
+
return pd.Series([0] * len(data))
|
| 156 |
+
|
| 157 |
+
normalized_data = []
|
| 158 |
+
for model in model_list:
|
| 159 |
+
if model in data.columns:
|
| 160 |
+
model_data = data[model]
|
| 161 |
+
# Normalize to 0-1 scale: (x - min) / (max - min)
|
| 162 |
+
normalized = (model_data - model_data.min()) / (model_data.max() - model_data.min())
|
| 163 |
+
normalized_data.append(normalized)
|
| 164 |
+
|
| 165 |
+
if normalized_data:
|
| 166 |
+
return pd.concat(normalized_data, axis=1).mean(axis=1)
|
| 167 |
+
else:
|
| 168 |
+
return pd.Series([0] * len(data))
|
| 169 |
+
|
| 170 |
+
data['avg_vision'] = normalize_models(vision_models)
|
| 171 |
+
data['avg_language'] = normalize_models(language_models)
|
| 172 |
+
data['avg_semantic'] = normalize_models(semantic_models)
|
| 173 |
+
|
| 174 |
+
# Get model rankings for this specific pair
|
| 175 |
+
rankings = self.get_model_rankings_for_pair(row_index)
|
| 176 |
+
|
| 177 |
+
# Get captions
|
| 178 |
+
caption1 = row.get('image_1_description', 'No caption available')
|
| 179 |
+
caption2 = row.get('image_2_description', 'No caption available')
|
| 180 |
+
|
| 181 |
+
# Get current ML model selection if available
|
| 182 |
+
current_ml_model = getattr(self, '_current_ml_model', None)
|
| 183 |
+
current_ml_name = getattr(self, '_current_ml_name', 'No model selected')
|
| 184 |
+
current_ml_score = 'N/A'
|
| 185 |
+
|
| 186 |
+
if current_ml_model is not None:
|
| 187 |
+
try:
|
| 188 |
+
if isinstance(current_ml_model, int) and current_ml_model < len(self.data_loader.ml_models):
|
| 189 |
+
ml_column = self.data_loader.ml_models[current_ml_model]
|
| 190 |
+
current_ml_score = f"{row[ml_column]:.3f}"
|
| 191 |
+
elif isinstance(current_ml_model, str) and current_ml_model.startswith('avg_'):
|
| 192 |
+
# Handle average models
|
| 193 |
+
current_ml_score = f"{row[current_ml_model]:.3f}"
|
| 194 |
+
except Exception:
|
| 195 |
+
pass
|
| 196 |
+
|
| 197 |
+
info_text = f"""## Image Pair #{row_index}
|
| 198 |
+
|
| 199 |
+
**Images:** `{row['image_1']}` vs `{row['image_2']}`
|
| 200 |
+
|
| 201 |
+
### Image Captions
|
| 202 |
+
**Image 1:** {caption1}
|
| 203 |
+
|
| 204 |
+
**Image 2:** {caption2}
|
| 205 |
+
|
| 206 |
+
### Similarity Scores
|
| 207 |
+
|
| 208 |
+
<div style="display: flex; gap: 20px;">
|
| 209 |
+
|
| 210 |
+
<div style="flex: 1;">
|
| 211 |
+
|
| 212 |
+
**Human & Brain Measures**
|
| 213 |
+
| Measure | Score |
|
| 214 |
+
|---------|-------|
|
| 215 |
+
| Human Rating | {row['human_judgement']:.3f}/6 |
|
| 216 |
+
| Brain (Cosine Common) | {row.get('cosine_similarity_roi_values_common', 0):.3f} |
|
| 217 |
+
| Brain (Cosine Early) | {row.get('cosine_similarity_roi_values_early', 0):.3f} |
|
| 218 |
+
| Brain (Cosine Late) | {row.get('cosine_similarity_roi_values_late', 0):.3f} |
|
| 219 |
+
|
| 220 |
+
</div>
|
| 221 |
+
|
| 222 |
+
<div style="flex: 1;">
|
| 223 |
+
|
| 224 |
+
**Brain Correlations & ML Models**
|
| 225 |
+
| Measure | Score |
|
| 226 |
+
|---------|-------|
|
| 227 |
+
| Brain (Pearson Common) | {row.get('pearson_correlation_roi_values_common', 0):.3f} |
|
| 228 |
+
| Brain (Pearson Early) | {row.get('pearson_correlation_roi_values_early', 0):.3f} |
|
| 229 |
+
| Brain (Pearson Late) | {row.get('pearson_correlation_roi_values_late', 0):.3f} |
|
| 230 |
+
| **Current ML Model** | **{current_ml_score}** |
|
| 231 |
+
|
| 232 |
+
</div>
|
| 233 |
+
|
| 234 |
+
</div>
|
| 235 |
+
|
| 236 |
+
### Model Category Averages
|
| 237 |
+
<div style="display: flex; gap: 20px;">
|
| 238 |
+
|
| 239 |
+
<div style="flex: 1;">
|
| 240 |
+
|
| 241 |
+
**Average Similarities**
|
| 242 |
+
| Model Category | Score |
|
| 243 |
+
|----------------|-------|
|
| 244 |
+
| Vision Models | {row.get('avg_vision', 0):.3f} |
|
| 245 |
+
| Language Models | {row.get('avg_language', 0):.3f} |
|
| 246 |
+
| Semantic Models | {row.get('avg_semantic', 0):.3f} |
|
| 247 |
+
|
| 248 |
+
</div>
|
| 249 |
+
|
| 250 |
+
<div style="flex: 1;">
|
| 251 |
+
|
| 252 |
+
**Model Information**
|
| 253 |
+
- **Current Selection:** {current_ml_name}
|
| 254 |
+
- **Total Vision Models:** {len([col for col in data.columns if 'BOLD5000_timm_' in col])}
|
| 255 |
+
- **Total Language Models:** {len([col for col in data.columns if 'bert-' in col or 'deberta-' in col or 'sup-simcse' in col])}
|
| 256 |
+
- **Total Semantic Models:** {len([col for col in data.columns if any(x in col for x in ["bm25", "rouge", "tf-idf", "co-occurrence"])])}
|
| 257 |
+
|
| 258 |
+
</div>
|
| 259 |
+
|
| 260 |
+
</div>
|
| 261 |
+
|
| 262 |
+
"""
|
| 263 |
+
|
| 264 |
+
# Add model rankings for each category in a more compact format
|
| 265 |
+
for category in ['vision', 'language', 'semantic']:
|
| 266 |
+
if category in rankings:
|
| 267 |
+
category_name = category.title()
|
| 268 |
+
info_text += f"### {category_name} Models\n"
|
| 269 |
+
info_text += "<div style='display: flex; gap: 20px;'>\n"
|
| 270 |
+
|
| 271 |
+
# Best models column
|
| 272 |
+
info_text += "<div style='flex: 1;'>\n**Top 3 Best:**\n"
|
| 273 |
+
for i, (model, score) in enumerate(rankings[category]['best'], 1):
|
| 274 |
+
clean_name = model.replace('BOLD5000_timm_', '').replace('_sim_partial', '') if 'BOLD5000_timm_' in model else model
|
| 275 |
+
info_text += f"{i}. {clean_name}: {score:.3f}\n"
|
| 276 |
+
info_text += "</div>\n"
|
| 277 |
+
|
| 278 |
+
# Worst models column
|
| 279 |
+
info_text += "<div style='flex: 1;'>\n**Top 3 Worst:**\n"
|
| 280 |
+
for i, (model, score) in enumerate(rankings[category]['worst'], 1):
|
| 281 |
+
clean_name = model.replace('BOLD5000_timm_', '').replace('_sim_partial', '') if 'BOLD5000_timm_' in model else model
|
| 282 |
+
info_text += f"{i}. {clean_name}: {score:.3f}\n"
|
| 283 |
+
info_text += "</div>\n"
|
| 284 |
+
|
| 285 |
+
info_text += "</div>\n\n"
|
| 286 |
+
|
| 287 |
+
# Create ROI comparison plot
|
| 288 |
+
roi_plot = self.roi_analyzer.create_roi_comparison_plot(data, row_index)
|
| 289 |
+
|
| 290 |
+
return img1, img2, info_text, roi_plot
|
| 291 |
+
|
| 292 |
+
except Exception as e:
|
| 293 |
+
return None, None, f"**Error:** {e}", None
|
| 294 |
+
|
| 295 |
+
def set_current_model(self, ml_model_selection, ml_name):
|
| 296 |
+
"""Store the current ML model selection for display in image viewer"""
|
| 297 |
+
self._current_ml_model = ml_model_selection
|
| 298 |
+
self._current_ml_name = ml_name
|
| 299 |
+
|
| 300 |
+
def launch(self, **kwargs):
|
| 301 |
+
"""Launch the Gradio interface"""
|
| 302 |
+
interface = self.gui.create_interface()
|
| 303 |
+
interface.launch(**kwargs)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def main():
|
| 307 |
+
"""Main function to run the application"""
|
| 308 |
+
try:
|
| 309 |
+
# Create and launch the app
|
| 310 |
+
app = SimilarityApp('overall_database.csv')
|
| 311 |
+
app.launch(
|
| 312 |
+
server_name="localhost",
|
| 313 |
+
server_port=7860,
|
| 314 |
+
share=True,
|
| 315 |
+
debug=True
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
except Exception as e:
|
| 319 |
+
print(f"Error starting application: {e}")
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
if __name__ == "__main__":
|
| 323 |
+
main()
|
brain/__pycache__/roi_analyzer.cpython-311.pyc
ADDED
|
Binary file (5.84 kB). View file
|
|
|
brain/__pycache__/roi_analyzer.cpython-39.pyc
ADDED
|
Binary file (3.25 kB). View file
|
|
|
brain/roi_analyzer.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# brain/roi_analyzer.py
|
| 2 |
+
"""Brain ROI analysis and visualization"""
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import numpy as np
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
from plotly.subplots import make_subplots
|
| 8 |
+
from typing import Optional, Tuple
|
| 9 |
+
import sys
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
# Add parent directory to path for config imports
|
| 13 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 14 |
+
from config.roi_config import ROIConfig
|
| 15 |
+
|
| 16 |
+
class ROIAnalyzer:
|
| 17 |
+
"""Handles ROI data analysis and visualization"""
|
| 18 |
+
|
| 19 |
+
@staticmethod
|
| 20 |
+
def parse_roi_values(roi_string) -> Optional[np.ndarray]:
|
| 21 |
+
"""Parse ROI values from string format"""
|
| 22 |
+
try:
|
| 23 |
+
if pd.isna(roi_string) or roi_string == '':
|
| 24 |
+
return None
|
| 25 |
+
# Remove brackets and split by spaces or commas
|
| 26 |
+
roi_string = roi_string.strip('[]')
|
| 27 |
+
values = [float(x) for x in roi_string.replace(',', ' ').split() if x.strip()]
|
| 28 |
+
return np.array(values)
|
| 29 |
+
except Exception as e:
|
| 30 |
+
print(f"Error parsing ROI values: {e}")
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
@staticmethod
|
| 34 |
+
def calculate_roi_similarities(roi1: np.ndarray, roi2: np.ndarray) -> Tuple[float, float]:
|
| 35 |
+
"""Calculate cosine and Pearson similarities between ROI vectors"""
|
| 36 |
+
cosine_sim = np.dot(roi1, roi2) / (np.linalg.norm(roi1) * np.linalg.norm(roi2))
|
| 37 |
+
pearson_sim = np.corrcoef(roi1, roi2)[0, 1]
|
| 38 |
+
return cosine_sim, pearson_sim
|
| 39 |
+
|
| 40 |
+
def create_roi_comparison_plot(self, data: pd.DataFrame, row_index: int) -> Optional[go.Figure]:
|
| 41 |
+
"""Create side-by-side bar plots comparing ROI values"""
|
| 42 |
+
try:
|
| 43 |
+
if row_index >= len(data):
|
| 44 |
+
return None
|
| 45 |
+
|
| 46 |
+
row = data.iloc[row_index]
|
| 47 |
+
|
| 48 |
+
# Parse ROI values for both images
|
| 49 |
+
roi1 = self.parse_roi_values(row.get('image_1_roi_values', ''))
|
| 50 |
+
roi2 = self.parse_roi_values(row.get('image_2_roi_values', ''))
|
| 51 |
+
|
| 52 |
+
if roi1 is None or roi2 is None:
|
| 53 |
+
return None
|
| 54 |
+
|
| 55 |
+
# Ensure both arrays have the same length
|
| 56 |
+
min_len = min(len(roi1), len(roi2))
|
| 57 |
+
roi1 = roi1[:min_len]
|
| 58 |
+
roi2 = roi2[:min_len]
|
| 59 |
+
|
| 60 |
+
# Get proper ROI labels
|
| 61 |
+
roi_labels = ROIConfig.COMMON_ROIS[:min_len]
|
| 62 |
+
|
| 63 |
+
# Create subplot
|
| 64 |
+
fig = make_subplots(
|
| 65 |
+
rows=1, cols=2,
|
| 66 |
+
subplot_titles=[f'Image 1: {row["image_1"]}', f'Image 2: {row["image_2"]}']
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Add bar plots
|
| 70 |
+
fig.add_trace(
|
| 71 |
+
go.Bar(
|
| 72 |
+
x=roi_labels,
|
| 73 |
+
y=roi1,
|
| 74 |
+
name='Image 1',
|
| 75 |
+
marker_color='lightblue',
|
| 76 |
+
showlegend=False
|
| 77 |
+
),
|
| 78 |
+
row=1, col=1
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
fig.add_trace(
|
| 82 |
+
go.Bar(
|
| 83 |
+
x=roi_labels,
|
| 84 |
+
y=roi2,
|
| 85 |
+
name='Image 2',
|
| 86 |
+
marker_color='lightcoral',
|
| 87 |
+
showlegend=False
|
| 88 |
+
),
|
| 89 |
+
row=1, col=2
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# Calculate similarities
|
| 93 |
+
cosine_sim, pearson_sim = self.calculate_roi_similarities(roi1, roi2)
|
| 94 |
+
|
| 95 |
+
# Update layout
|
| 96 |
+
fig.update_layout(
|
| 97 |
+
title=f'ROI Comparison - Pair #{row_index}<br>Cosine: {cosine_sim:.3f}, Pearson: {pearson_sim:.3f}',
|
| 98 |
+
height=500,
|
| 99 |
+
width=1000,
|
| 100 |
+
showlegend=False
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# Update x-axis labels
|
| 104 |
+
fig.update_xaxes(tickangle=45)
|
| 105 |
+
fig.update_yaxes(title_text="Activation Level")
|
| 106 |
+
|
| 107 |
+
return fig
|
| 108 |
+
|
| 109 |
+
except Exception as e:
|
| 110 |
+
print(f"Error creating ROI plot: {e}")
|
| 111 |
+
return None
|
config/__pycache__/roi_config.cpython-311.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
config/__pycache__/roi_config.cpython-39.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
config/roi_config.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==================== config/roi_config.py ====================
|
| 2 |
+
"""ROI configuration and constants"""
|
| 3 |
+
|
| 4 |
+
class ROIConfig:
|
| 5 |
+
"""Configuration for brain regions of interest"""
|
| 6 |
+
|
| 7 |
+
# All ROIs in order
|
| 8 |
+
COMMON_ROIS = [
|
| 9 |
+
'OFA', 'FFA1', 'FFA2', 'EBA', 'FBA2', 'OPA', 'PPA', 'RSC',
|
| 10 |
+
'OWFA', 'VWFA1', 'VWFA2', 'mfs words', 'V1v', 'V1d', 'V2v',
|
| 11 |
+
'V2d', 'V3v', 'V3d', 'hV4'
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
# Early/low-level regions (visual processing)
|
| 15 |
+
EARLY_ROIS = ['V1v', 'V1d', 'V2v', 'V2d', 'V3v', 'V3d', 'hV4']
|
| 16 |
+
|
| 17 |
+
# Late/high-level regions (semantic and categorical processing)
|
| 18 |
+
LATE_ROIS = [
|
| 19 |
+
'OFA', 'FFA1', 'FFA2', 'EBA', 'FBA2', 'OPA', 'PPA', 'RSC',
|
| 20 |
+
'OWFA', 'VWFA1', 'VWFA2', 'mfs words'
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
@classmethod
|
| 24 |
+
def get_roi_labels(cls, roi_type='common'):
|
| 25 |
+
"""Get ROI labels for a specific type"""
|
| 26 |
+
if roi_type == 'common':
|
| 27 |
+
return cls.COMMON_ROIS
|
| 28 |
+
elif roi_type == 'early':
|
| 29 |
+
return cls.EARLY_ROIS
|
| 30 |
+
elif roi_type == 'late':
|
| 31 |
+
return cls.LATE_ROIS
|
| 32 |
+
else:
|
| 33 |
+
return cls.COMMON_ROIS
|
| 34 |
+
|
| 35 |
+
@classmethod
|
| 36 |
+
def get_roi_type_from_measure(cls, brain_measure):
|
| 37 |
+
"""Extract ROI type from brain measure name"""
|
| 38 |
+
if 'common' in brain_measure:
|
| 39 |
+
return 'common'
|
| 40 |
+
elif 'early' in brain_measure:
|
| 41 |
+
return 'early'
|
| 42 |
+
elif 'late' in brain_measure:
|
| 43 |
+
return 'late'
|
| 44 |
+
return 'common'
|
data/__pycache__/data_loader.cpython-311.pyc
ADDED
|
Binary file (7.75 kB). View file
|
|
|
data/__pycache__/data_loader.cpython-39.pyc
ADDED
|
Binary file (4.7 kB). View file
|
|
|
data/data_loader.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==================== data/data_loader.py ====================
|
| 2 |
+
"""Data loading and preprocessing functionality"""
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from typing import List, Dict, Tuple, Optional
|
| 6 |
+
|
| 7 |
+
class DataLoader:
|
| 8 |
+
"""Handles loading and parsing of similarity data"""
|
| 9 |
+
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.data: Optional[pd.DataFrame] = None
|
| 12 |
+
self.ml_models: List[str] = []
|
| 13 |
+
self.brain_measures: List[str] = []
|
| 14 |
+
self.model_categories: Dict[str, List[Tuple[str, int]]] = {
|
| 15 |
+
'vision': [], 'language': [], 'semantic': [], 'other': []
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
def get_model_type(self, model_name: str) -> str:
|
| 19 |
+
"""Categorize model types"""
|
| 20 |
+
if any(x in model_name for x in ["bm25", "rouge", "tf-idf", "co-occurrence"]):
|
| 21 |
+
return "semantic"
|
| 22 |
+
elif any(x in model_name for x in ["bert", "deberta", "simcse"]):
|
| 23 |
+
return "language"
|
| 24 |
+
elif "timm_" in model_name:
|
| 25 |
+
return "vision"
|
| 26 |
+
else:
|
| 27 |
+
return "other"
|
| 28 |
+
|
| 29 |
+
def load_csv(self, csv_path: str) -> bool:
|
| 30 |
+
"""Load similarity data from CSV file"""
|
| 31 |
+
try:
|
| 32 |
+
self.data = pd.read_csv(csv_path)
|
| 33 |
+
self._extract_ml_models()
|
| 34 |
+
self._categorize_models()
|
| 35 |
+
self._extract_brain_measures()
|
| 36 |
+
self._print_summary()
|
| 37 |
+
return True
|
| 38 |
+
except Exception as e:
|
| 39 |
+
print(f"Error loading data: {e}")
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
def _extract_ml_models(self):
|
| 43 |
+
"""Extract ML model columns"""
|
| 44 |
+
self.ml_models = [col for col in self.data.columns if
|
| 45 |
+
'BOLD5000_timm_' in col or
|
| 46 |
+
'bert-' in col or
|
| 47 |
+
'deberta-' in col or
|
| 48 |
+
'sup-simcse' in col or
|
| 49 |
+
col in ['bm25S_captions', 'co-occurrence-rep_tags', 'co-occurrence_captions',
|
| 50 |
+
'bm25S_tags', 'co-occurrence_tags', 'rouge_captions',
|
| 51 |
+
'tf-idf-cosine_captions', 'rouge_tags', 'tf-idf-cosine_tags']]
|
| 52 |
+
|
| 53 |
+
def _categorize_models(self):
|
| 54 |
+
"""Categorize models by type"""
|
| 55 |
+
self.model_categories = {'vision': [], 'language': [], 'semantic': [], 'other': []}
|
| 56 |
+
for i, model in enumerate(self.ml_models):
|
| 57 |
+
category = self.get_model_type(model)
|
| 58 |
+
self.model_categories[category].append((model, i))
|
| 59 |
+
|
| 60 |
+
def _extract_brain_measures(self):
|
| 61 |
+
"""Extract brain response columns"""
|
| 62 |
+
self.brain_measures = []
|
| 63 |
+
for measure_type in ['cosine_similarity', 'pearson_correlation']:
|
| 64 |
+
for roi_type in ['common', 'early', 'late']:
|
| 65 |
+
col_name = f'{measure_type}_roi_values_{roi_type}'
|
| 66 |
+
if col_name in self.data.columns:
|
| 67 |
+
self.brain_measures.append(col_name)
|
| 68 |
+
|
| 69 |
+
def _print_summary(self):
|
| 70 |
+
"""Print data loading summary"""
|
| 71 |
+
print(f"Loaded {len(self.data)} image pairs")
|
| 72 |
+
print(f"Found {len(self.ml_models)} ML models total:")
|
| 73 |
+
for category, models in self.model_categories.items():
|
| 74 |
+
print(f" {category.title()}: {len(models)}")
|
| 75 |
+
print(f"Brain measures: {self.brain_measures}")
|
| 76 |
+
|
| 77 |
+
def get_ml_model_options(self) -> List[Tuple[str, any]]:
|
| 78 |
+
"""Get ML model options with averages and complete names"""
|
| 79 |
+
options = []
|
| 80 |
+
|
| 81 |
+
# Add category averages first
|
| 82 |
+
for category in ['vision', 'language', 'semantic']:
|
| 83 |
+
if self.model_categories[category]:
|
| 84 |
+
options.append((f"AVERAGE - {category.title()} Models", f"avg_{category}"))
|
| 85 |
+
|
| 86 |
+
# Add separator
|
| 87 |
+
if any(self.model_categories.values()):
|
| 88 |
+
options.append(("────────────────────────────", "separator"))
|
| 89 |
+
|
| 90 |
+
# Add individual models
|
| 91 |
+
for i, model_name in enumerate(self.ml_models):
|
| 92 |
+
options.append((model_name, i))
|
| 93 |
+
|
| 94 |
+
return options
|
| 95 |
+
|
| 96 |
+
def get_brain_measure_options(self) -> List[Tuple[str, str]]:
|
| 97 |
+
"""Get brain measure options with clean names"""
|
| 98 |
+
options = []
|
| 99 |
+
for measure in self.brain_measures:
|
| 100 |
+
if 'cosine_similarity' in measure and 'common' in measure:
|
| 101 |
+
options.append(("Cosine - Common", measure))
|
| 102 |
+
elif 'cosine_similarity' in measure and 'early' in measure:
|
| 103 |
+
options.append(("Cosine - Early", measure))
|
| 104 |
+
elif 'cosine_similarity' in measure and 'late' in measure:
|
| 105 |
+
options.append(("Cosine - Late", measure))
|
| 106 |
+
elif 'pearson_correlation' in measure and 'common' in measure:
|
| 107 |
+
options.append(("Pearson - Common", measure))
|
| 108 |
+
elif 'pearson_correlation' in measure and 'early' in measure:
|
| 109 |
+
options.append(("Pearson - Early", measure))
|
| 110 |
+
elif 'pearson_correlation' in measure and 'late' in measure:
|
| 111 |
+
options.append(("Pearson - Late", measure))
|
| 112 |
+
return options
|
gui/__pycache__/gradio_interface.cpython-311.pyc
ADDED
|
Binary file (11.8 kB). View file
|
|
|
gui/__pycache__/gradio_interface.cpython-39.pyc
ADDED
|
Binary file (5.97 kB). View file
|
|
|
gui/gradio_interface.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==================== gui/gradio_interface.py ====================
|
| 2 |
+
"""Gradio interface components and layout"""
|
| 3 |
+
|
| 4 |
+
import gradio as gr
|
| 5 |
+
from typing import TYPE_CHECKING, Tuple, Optional, Any
|
| 6 |
+
|
| 7 |
+
if TYPE_CHECKING:
|
| 8 |
+
from main import SimilarityApp
|
| 9 |
+
|
| 10 |
+
class GradioInterface:
|
| 11 |
+
"""Handles the Gradio interface creation and event management"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, app: 'SimilarityApp'):
|
| 14 |
+
self.app = app
|
| 15 |
+
|
| 16 |
+
def create_interface(self) -> gr.Blocks:
|
| 17 |
+
"""Create the main Gradio interface"""
|
| 18 |
+
brain_options = self.app.data_loader.get_brain_measure_options()
|
| 19 |
+
ml_options = self.app.data_loader.get_ml_model_options()
|
| 20 |
+
|
| 21 |
+
with gr.Blocks(title="Enhanced Image Similarity Analysis", theme=gr.themes.Soft()) as interface:
|
| 22 |
+
# Header
|
| 23 |
+
self._create_header()
|
| 24 |
+
|
| 25 |
+
# Main analysis section
|
| 26 |
+
with gr.Row():
|
| 27 |
+
# Controls panel
|
| 28 |
+
with gr.Column(scale=1):
|
| 29 |
+
controls_components = self._create_controls_panel(brain_options, ml_options)
|
| 30 |
+
|
| 31 |
+
# 3D visualization
|
| 32 |
+
with gr.Column(scale=2):
|
| 33 |
+
plot_3d = self._create_3d_section()
|
| 34 |
+
|
| 35 |
+
# 2D plots section
|
| 36 |
+
plot_2d = self._create_2d_section()
|
| 37 |
+
|
| 38 |
+
# Image viewer section
|
| 39 |
+
image_components = self._create_image_viewer_section()
|
| 40 |
+
|
| 41 |
+
# ROI visualization section
|
| 42 |
+
roi_plot = self._create_roi_section()
|
| 43 |
+
|
| 44 |
+
# Connect all event handlers
|
| 45 |
+
self._connect_events(controls_components, plot_3d, plot_2d, image_components, roi_plot)
|
| 46 |
+
|
| 47 |
+
return interface
|
| 48 |
+
|
| 49 |
+
def _create_header(self):
|
| 50 |
+
"""Create the header section"""
|
| 51 |
+
gr.Markdown("# Enhanced Image Similarity Analysis")
|
| 52 |
+
gr.Markdown("Compare human judgments, brain responses (Cosine & Pearson), and ML model similarities with captions and ROI visualization")
|
| 53 |
+
|
| 54 |
+
def _create_controls_panel(self, brain_options, ml_options) -> dict:
|
| 55 |
+
"""Create the controls panel"""
|
| 56 |
+
gr.Markdown("### Controls")
|
| 57 |
+
|
| 58 |
+
brain_dropdown = gr.Dropdown(
|
| 59 |
+
choices=brain_options,
|
| 60 |
+
value=brain_options[0][1] if brain_options else None,
|
| 61 |
+
label="Brain Response Type",
|
| 62 |
+
info="Choose between Cosine similarity and Pearson correlation"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
ml_dropdown = gr.Dropdown(
|
| 66 |
+
choices=ml_options,
|
| 67 |
+
value=ml_options[0][1] if ml_options else None,
|
| 68 |
+
label="ML Model",
|
| 69 |
+
info="Individual models or category averages"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
update_btn = gr.Button("Update Plots", variant="primary")
|
| 73 |
+
stats_display = gr.Markdown("Select parameters and click 'Update Plots'")
|
| 74 |
+
|
| 75 |
+
return {
|
| 76 |
+
'brain_dropdown': brain_dropdown,
|
| 77 |
+
'ml_dropdown': ml_dropdown,
|
| 78 |
+
'update_btn': update_btn,
|
| 79 |
+
'stats_display': stats_display
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
def _create_3d_section(self) -> gr.Plot:
|
| 83 |
+
"""Create the 3D visualization section"""
|
| 84 |
+
gr.Markdown("### 3D Visualization")
|
| 85 |
+
return gr.Plot(label="3D Scatter Plot")
|
| 86 |
+
|
| 87 |
+
def _create_2d_section(self) -> gr.Plot:
|
| 88 |
+
"""Create the 2D plots section"""
|
| 89 |
+
with gr.Row():
|
| 90 |
+
with gr.Column():
|
| 91 |
+
gr.Markdown("### 2D Pairwise Comparisons")
|
| 92 |
+
return gr.Plot(label="2D Scatter Plots", show_label=False)
|
| 93 |
+
|
| 94 |
+
def _create_image_viewer_section(self) -> dict:
|
| 95 |
+
"""Create the image viewer section"""
|
| 96 |
+
with gr.Row():
|
| 97 |
+
with gr.Column(scale=1):
|
| 98 |
+
gr.Markdown("### Image Pair Viewer")
|
| 99 |
+
|
| 100 |
+
with gr.Row():
|
| 101 |
+
row_input = gr.Number(
|
| 102 |
+
value=0,
|
| 103 |
+
label="Image Pair Index",
|
| 104 |
+
info=f"Enter 0 to {len(self.app.data_loader.data)-1}",
|
| 105 |
+
precision=0
|
| 106 |
+
)
|
| 107 |
+
show_btn = gr.Button("Show Images & ROI", variant="secondary", size="sm")
|
| 108 |
+
|
| 109 |
+
image_info = gr.Markdown("Enter an index and click 'Show Images & ROI' to see details")
|
| 110 |
+
|
| 111 |
+
with gr.Column(scale=1):
|
| 112 |
+
gr.Markdown("### Image 1")
|
| 113 |
+
image1_display = gr.Image(label="Image 1", height=200)
|
| 114 |
+
|
| 115 |
+
with gr.Column(scale=1):
|
| 116 |
+
gr.Markdown("### Image 2")
|
| 117 |
+
image2_display = gr.Image(label="Image 2", height=200)
|
| 118 |
+
|
| 119 |
+
return {
|
| 120 |
+
'row_input': row_input,
|
| 121 |
+
'show_btn': show_btn,
|
| 122 |
+
'image_info': image_info,
|
| 123 |
+
'image1_display': image1_display,
|
| 124 |
+
'image2_display': image2_display
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
def _create_roi_section(self) -> gr.Plot:
|
| 128 |
+
"""Create the ROI visualization section"""
|
| 129 |
+
with gr.Row():
|
| 130 |
+
with gr.Column():
|
| 131 |
+
gr.Markdown("### ROI Brain Activation Comparison")
|
| 132 |
+
return gr.Plot(label="Side-by-Side ROI Values", show_label=False)
|
| 133 |
+
|
| 134 |
+
def _connect_events(self, controls, plot_3d, plot_2d, image_components, roi_plot):
|
| 135 |
+
"""Connect all event handlers"""
|
| 136 |
+
|
| 137 |
+
def update_all(brain_measure, ml_model_selection):
|
| 138 |
+
if ml_model_selection == "separator":
|
| 139 |
+
return None, None, "Please select a valid model or average option"
|
| 140 |
+
|
| 141 |
+
fig_3d, fig_2d = self.app.update_plots(brain_measure, ml_model_selection)
|
| 142 |
+
stats = self.app.get_correlations(brain_measure, ml_model_selection)
|
| 143 |
+
return fig_3d, fig_2d, stats
|
| 144 |
+
|
| 145 |
+
def show_images_and_roi(row_idx):
|
| 146 |
+
img1, img2, info, roi_fig = self.app.show_image_pair(int(row_idx) if row_idx is not None else 0)
|
| 147 |
+
return img1, img2, info, roi_fig
|
| 148 |
+
|
| 149 |
+
# Connect main plot updates
|
| 150 |
+
controls['update_btn'].click(
|
| 151 |
+
fn=update_all,
|
| 152 |
+
inputs=[controls['brain_dropdown'], controls['ml_dropdown']],
|
| 153 |
+
outputs=[plot_3d, plot_2d, controls['stats_display']]
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
controls['brain_dropdown'].change(
|
| 157 |
+
fn=update_all,
|
| 158 |
+
inputs=[controls['brain_dropdown'], controls['ml_dropdown']],
|
| 159 |
+
outputs=[plot_3d, plot_2d, controls['stats_display']]
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
controls['ml_dropdown'].change(
|
| 163 |
+
fn=update_all,
|
| 164 |
+
inputs=[controls['brain_dropdown'], controls['ml_dropdown']],
|
| 165 |
+
outputs=[plot_3d, plot_2d, controls['stats_display']]
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
# Connect image viewer
|
| 169 |
+
image_components['show_btn'].click(
|
| 170 |
+
fn=show_images_and_roi,
|
| 171 |
+
inputs=[image_components['row_input']],
|
| 172 |
+
outputs=[
|
| 173 |
+
image_components['image1_display'],
|
| 174 |
+
image_components['image2_display'],
|
| 175 |
+
image_components['image_info'],
|
| 176 |
+
roi_plot
|
| 177 |
+
]
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
image_components['row_input'].change(
|
| 181 |
+
fn=show_images_and_roi,
|
| 182 |
+
inputs=[image_components['row_input']],
|
| 183 |
+
outputs=[
|
| 184 |
+
image_components['image1_display'],
|
| 185 |
+
image_components['image2_display'],
|
| 186 |
+
image_components['image_info'],
|
| 187 |
+
roi_plot
|
| 188 |
+
]
|
| 189 |
+
)
|
overall_database.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08b27836f940efc44631b48b31964c02ae9ae0b7ef0c35ec4b33f1181e9e5480
|
| 3 |
+
size 44624446
|
requirements.txt
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py==2.0.0
|
| 2 |
+
accelerate==0.33.0
|
| 3 |
+
aiobotocore @ file:///C:/b/abs_1c1a_vjay2/croot/aiobotocore_1682537737724/work
|
| 4 |
+
aiofiles @ file:///C:/b/abs_9ex6mi6b56/croot/aiofiles_1683773603390/work
|
| 5 |
+
aiohttp @ file:///C:/b/abs_b78zt6vo64/croot/aiohttp_1694181126607/work
|
| 6 |
+
aioitertools @ file:///tmp/build/80754af9/aioitertools_1607109665762/work
|
| 7 |
+
aiosignal @ file:///tmp/build/80754af9/aiosignal_1637843061372/work
|
| 8 |
+
aiosqlite @ file:///C:/b/abs_9djc_0pyi3/croot/aiosqlite_1683773915844/work
|
| 9 |
+
alabaster @ file:///home/ktietz/src/ci/alabaster_1611921544520/work
|
| 10 |
+
anaconda-anon-usage @ file:///C:/b/abs_f4tsjyl9va/croot/anaconda-anon-usage_1695310457827/work
|
| 11 |
+
anaconda-catalogs @ file:///C:/b/abs_8btyy0o8s8/croot/anaconda-catalogs_1685727315626/work
|
| 12 |
+
anaconda-client @ file:///C:/b/abs_80wttmgui4/croot/anaconda-client_1694625288614/work
|
| 13 |
+
anaconda-cloud-auth @ file:///C:/b/abs_5cjpnu6wjb/croot/anaconda-cloud-auth_1694462130037/work
|
| 14 |
+
anaconda-navigator @ file:///C:/b/abs_ab00e0_u7e/croot/anaconda-navigator_1695238210954/work
|
| 15 |
+
anaconda-project @ file:///C:/ci_311/anaconda-project_1676458365912/work
|
| 16 |
+
annotated-types==0.7.0
|
| 17 |
+
antlr4-python3-runtime==4.9.3
|
| 18 |
+
anyio==4.10.0
|
| 19 |
+
appdirs==1.4.4
|
| 20 |
+
argon2-cffi @ file:///opt/conda/conda-bld/argon2-cffi_1645000214183/work
|
| 21 |
+
argon2-cffi-bindings @ file:///C:/ci_311/argon2-cffi-bindings_1676424443321/work
|
| 22 |
+
arrow @ file:///C:/ci_311/arrow_1678249767083/work
|
| 23 |
+
astroid @ file:///C:/ci_311/astroid_1678740610167/work
|
| 24 |
+
astropy @ file:///C:/ci_311_rebuilds/astropy_1678996071858/work
|
| 25 |
+
asttokens @ file:///opt/conda/conda-bld/asttokens_1646925590279/work
|
| 26 |
+
astunparse==1.6.3
|
| 27 |
+
async-timeout @ file:///C:/ci_311/async-timeout_1676431518331/work
|
| 28 |
+
atomicwrites==1.4.0
|
| 29 |
+
attrs @ file:///C:/ci_311/attrs_1676422272484/work
|
| 30 |
+
audioread==3.0.1
|
| 31 |
+
Automat @ file:///tmp/build/80754af9/automat_1600298431173/work
|
| 32 |
+
autopep8 @ file:///opt/conda/conda-bld/autopep8_1650463822033/work
|
| 33 |
+
autoreject==0.4.2
|
| 34 |
+
Babel @ file:///C:/ci_311/babel_1676427169844/work
|
| 35 |
+
backcall @ file:///home/ktietz/src/ci/backcall_1611930011877/work
|
| 36 |
+
backports.functools-lru-cache @ file:///tmp/build/80754af9/backports.functools_lru_cache_1618170165463/work
|
| 37 |
+
backports.tempfile @ file:///home/linux1/recipes/ci/backports.tempfile_1610991236607/work
|
| 38 |
+
backports.weakref==1.0.post1
|
| 39 |
+
bcrypt @ file:///C:/ci_311/bcrypt_1676435170049/work
|
| 40 |
+
beautifulsoup4 @ file:///C:/b/abs_0agyz1wsr4/croot/beautifulsoup4-split_1681493048687/work
|
| 41 |
+
binaryornot @ file:///tmp/build/80754af9/binaryornot_1617751525010/work
|
| 42 |
+
black @ file:///C:/b/abs_620t6ndje8/croot/black_1680737261963/work
|
| 43 |
+
bleach @ file:///opt/conda/conda-bld/bleach_1641577558959/work
|
| 44 |
+
bokeh @ file:///C:/b/abs_e5qs_0dl2w/croot/bokeh_1690546119144/work
|
| 45 |
+
boltons @ file:///C:/ci_311/boltons_1677729932371/work
|
| 46 |
+
boto3==1.35.21
|
| 47 |
+
botocore==1.35.21
|
| 48 |
+
Bottleneck @ file:///C:/ci_311/bottleneck_1676500016583/work
|
| 49 |
+
Brotli==1.1.0
|
| 50 |
+
brotlipy==0.7.0
|
| 51 |
+
cachetools==5.3.2
|
| 52 |
+
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1700303426725/work/certifi
|
| 53 |
+
cffi @ file:///C:/ci_311/cffi_1676423759166/work
|
| 54 |
+
cftime==1.6.3
|
| 55 |
+
chardet @ file:///C:/ci_311/chardet_1676436134885/work
|
| 56 |
+
charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
|
| 57 |
+
click @ file:///C:/ci_311/click_1676433091657/work
|
| 58 |
+
cloudpickle @ file:///C:/b/abs_3796yxesic/croot/cloudpickle_1683040098851/work
|
| 59 |
+
clyent==1.2.2
|
| 60 |
+
colorama @ file:///C:/ci_311/colorama_1676422310965/work
|
| 61 |
+
colorcet @ file:///C:/ci_311/colorcet_1676440389947/work
|
| 62 |
+
coloredlogs==15.0.1
|
| 63 |
+
comm @ file:///C:/ci_311/comm_1678376562840/work
|
| 64 |
+
conda @ file:///C:/b/abs_3eb7ewgq2c/croot/conda_1694545461647/work
|
| 65 |
+
conda-build @ file:///C:/b/abs_8di4gx5nj5/croot/conda-build_1692366837286/work
|
| 66 |
+
conda-content-trust @ file:///C:/b/abs_e3bcpyv7sw/croot/conda-content-trust_1693490654398/work
|
| 67 |
+
conda-libmamba-solver @ file:///C:/b/abs_016p0csqp7/croot/conda-libmamba-solver_1691418958509/work/src
|
| 68 |
+
conda-pack @ file:///tmp/build/80754af9/conda-pack_1611163042455/work
|
| 69 |
+
conda-package-handling @ file:///C:/b/abs_b9wp3lr1gn/croot/conda-package-handling_1691008700066/work
|
| 70 |
+
conda-repo-cli==1.0.75
|
| 71 |
+
conda-token @ file:///Users/paulyim/miniconda3/envs/c3i/conda-bld/conda-token_1662660369760/work
|
| 72 |
+
conda-verify==3.4.2
|
| 73 |
+
conda_index @ file:///C:/b/abs_50towt3zan/croot/conda-index_1695311135992/work
|
| 74 |
+
conda_package_streaming @ file:///C:/b/abs_6c28n38aaj/croot/conda-package-streaming_1690988019210/work
|
| 75 |
+
constantly==15.1.0
|
| 76 |
+
contourpy @ file:///C:/ci_311/contourpy_1676431756017/work
|
| 77 |
+
cookiecutter @ file:///opt/conda/conda-bld/cookiecutter_1649151442564/work
|
| 78 |
+
cryptography @ file:///C:/b/abs_f4do8t8jfs/croot/cryptography_1694444424531/work
|
| 79 |
+
cssselect==1.1.0
|
| 80 |
+
cvxopt==1.3.2
|
| 81 |
+
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
|
| 82 |
+
cytoolz @ file:///C:/ci_311/cytoolz_1676436342770/work
|
| 83 |
+
daal4py==2023.1.1
|
| 84 |
+
dask @ file:///C:/b/abs_23lvfodys3/croot/dask-core_1686782960052/work
|
| 85 |
+
datasets @ file:///C:/b/abs_a3jy4vrfuo/croot/datasets_1684484478038/work
|
| 86 |
+
datashader @ file:///C:/b/abs_8323862uxi/croot/datashader_1692372298149/work
|
| 87 |
+
datashape==0.5.4
|
| 88 |
+
debugpy @ file:///C:/b/abs_c0y1fjipt2/croot/debugpy_1690906864587/work
|
| 89 |
+
decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
|
| 90 |
+
defusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work
|
| 91 |
+
dgl==1.1.2
|
| 92 |
+
diff-match-patch @ file:///Users/ktietz/demo/mc3/conda-bld/diff-match-patch_1630511840874/work
|
| 93 |
+
diffusers==0.31.0
|
| 94 |
+
dill @ file:///C:/ci_311/dill_1676433323862/work
|
| 95 |
+
distributed @ file:///C:/b/abs_7509xfv227/croot/distributed_1686866088894/work
|
| 96 |
+
distro==1.9.0
|
| 97 |
+
docstring-to-markdown @ file:///C:/ci_311/docstring-to-markdown_1677742566583/work
|
| 98 |
+
docutils @ file:///C:/ci_311/docutils_1676428078664/work
|
| 99 |
+
eeglabio==0.0.2.post4
|
| 100 |
+
entrypoints @ file:///C:/ci_311/entrypoints_1676423328987/work
|
| 101 |
+
et-xmlfile==1.1.0
|
| 102 |
+
executing @ file:///opt/conda/conda-bld/executing_1646925071911/work
|
| 103 |
+
fastapi==0.116.1
|
| 104 |
+
fastjsonschema @ file:///C:/ci_311/python-fastjsonschema_1679500568724/work
|
| 105 |
+
ffmpy==0.6.1
|
| 106 |
+
filelock @ file:///C:/ci_311/filelock_1676427284139/work
|
| 107 |
+
flake8 @ file:///C:/ci_311/flake8_1678376624746/work
|
| 108 |
+
Flask @ file:///C:/ci_311/flask_1676436667658/work
|
| 109 |
+
flatbuffers==23.5.26
|
| 110 |
+
fonttools==4.25.0
|
| 111 |
+
frozenlist @ file:///C:/ci_311/frozenlist_1676428131576/work
|
| 112 |
+
fsspec==2024.6.1
|
| 113 |
+
future @ file:///C:/ci_311_rebuilds/future_1678998246262/work
|
| 114 |
+
gast==0.5.4
|
| 115 |
+
gensim @ file:///C:/ci_311/gensim_1677743037820/work
|
| 116 |
+
glob2 @ file:///home/linux1/recipes/ci/glob2_1610991677669/work
|
| 117 |
+
google-auth==2.25.2
|
| 118 |
+
google-auth-oauthlib==1.0.0
|
| 119 |
+
google-pasta==0.2.0
|
| 120 |
+
gradio==5.45.0
|
| 121 |
+
gradio_client==1.13.0
|
| 122 |
+
greenlet @ file:///C:/ci_311/greenlet_1676436788118/work
|
| 123 |
+
groovy==0.1.2
|
| 124 |
+
grpcio==1.60.0
|
| 125 |
+
h11==0.14.0
|
| 126 |
+
h5io==0.1.9
|
| 127 |
+
h5py @ file:///C:/b/abs_17fav01gwy/croot/h5py_1691589733413/work
|
| 128 |
+
HeapDict @ file:///Users/ktietz/demo/mc3/conda-bld/heapdict_1630598515714/work
|
| 129 |
+
holoviews @ file:///C:/b/abs_fa7afixkhc/croot/holoviews_1693378101313/work
|
| 130 |
+
httpcore==1.0.7
|
| 131 |
+
httpx==0.28.1
|
| 132 |
+
huggingface-hub==0.34.4
|
| 133 |
+
humanfriendly==10.0
|
| 134 |
+
hvplot @ file:///C:/b/abs_2b13wifauw/croot/hvplot_1685998632349/work
|
| 135 |
+
hyperlink @ file:///tmp/build/80754af9/hyperlink_1610130746837/work
|
| 136 |
+
idna @ file:///C:/ci_311/idna_1676424932545/work
|
| 137 |
+
imagecodecs @ file:///C:/b/abs_e2g5zbs1q0/croot/imagecodecs_1695065012000/work
|
| 138 |
+
ImageHash==4.3.1
|
| 139 |
+
imageio @ file:///C:/ci_311/imageio_1678373794394/work
|
| 140 |
+
imagesize @ file:///C:/ci_311/imagesize_1676431905616/work
|
| 141 |
+
imbalanced-learn @ file:///C:/b/abs_275a0acaq2/croot/imbalanced-learn_1685025644593/work
|
| 142 |
+
importlib-metadata @ file:///C:/b/abs_20ndzb2j6v/croot/importlib-metadata_1678997085534/work
|
| 143 |
+
incremental @ file:///tmp/build/80754af9/incremental_1636629750599/work
|
| 144 |
+
inflection==0.5.1
|
| 145 |
+
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
|
| 146 |
+
innvestigate==2.1.2
|
| 147 |
+
intake @ file:///C:/ci_311_rebuilds/intake_1678999914269/work
|
| 148 |
+
intervaltree @ file:///Users/ktietz/demo/mc3/conda-bld/intervaltree_1630511889664/work
|
| 149 |
+
ipykernel @ file:///C:/b/abs_07rkft_vaz/croot/ipykernel_1691121700587/work
|
| 150 |
+
ipython @ file:///C:/b/abs_e5729i179y/croot/ipython_1694181400005/work
|
| 151 |
+
ipython-genutils @ file:///tmp/build/80754af9/ipython_genutils_1606773439826/work
|
| 152 |
+
ipywidgets @ file:///C:/b/abs_5awapknmz_/croot/ipywidgets_1679394824767/work
|
| 153 |
+
isort @ file:///tmp/build/80754af9/isort_1628603791788/work
|
| 154 |
+
itemadapter @ file:///tmp/build/80754af9/itemadapter_1626442940632/work
|
| 155 |
+
itemloaders @ file:///opt/conda/conda-bld/itemloaders_1646805235997/work
|
| 156 |
+
itsdangerous @ file:///tmp/build/80754af9/itsdangerous_1621432558163/work
|
| 157 |
+
jaraco.classes @ file:///tmp/build/80754af9/jaraco.classes_1620983179379/work
|
| 158 |
+
jedi @ file:///C:/ci_311/jedi_1679427407646/work
|
| 159 |
+
jellyfish @ file:///C:/b/abs_50kgvtnrbj/croot/jellyfish_1695193564091/work
|
| 160 |
+
Jinja2 @ file:///C:/ci_311/jinja2_1676424968965/work
|
| 161 |
+
jinja2-time @ file:///opt/conda/conda-bld/jinja2-time_1649251842261/work
|
| 162 |
+
jiter==0.8.2
|
| 163 |
+
jmespath @ file:///Users/ktietz/demo/mc3/conda-bld/jmespath_1630583964805/work
|
| 164 |
+
joblib @ file:///C:/b/abs_1anqjntpan/croot/joblib_1685113317150/work
|
| 165 |
+
json5 @ file:///tmp/build/80754af9/json5_1624432770122/work
|
| 166 |
+
jsonpatch @ file:///tmp/build/80754af9/jsonpatch_1615747632069/work
|
| 167 |
+
jsonpointer==2.1
|
| 168 |
+
jsonschema @ file:///C:/b/abs_d40z05b6r1/croot/jsonschema_1678983446576/work
|
| 169 |
+
jupyter @ file:///C:/ci_311/jupyter_1678249952587/work
|
| 170 |
+
jupyter-console @ file:///C:/b/abs_82xaa6i2y4/croot/jupyter_console_1680000189372/work
|
| 171 |
+
jupyter-events @ file:///C:/b/abs_4cak_28ewz/croot/jupyter_events_1684268050893/work
|
| 172 |
+
jupyter-server @ file:///C:/ci_311/jupyter_server_1678228762759/work
|
| 173 |
+
jupyter-ydoc @ file:///C:/b/abs_e7m6nh5lao/croot/jupyter_ydoc_1683747253535/work
|
| 174 |
+
jupyter_client @ file:///C:/ci_311/jupyter_client_1676424009414/work
|
| 175 |
+
jupyter_core @ file:///C:/b/abs_9d0ttho3bs/croot/jupyter_core_1679906581955/work
|
| 176 |
+
jupyter_server_fileid @ file:///C:/b/abs_f1yjnmiq_6/croot/jupyter_server_fileid_1684273602142/work
|
| 177 |
+
jupyter_server_ydoc @ file:///C:/b/abs_8ai39bligw/croot/jupyter_server_ydoc_1686767445888/work
|
| 178 |
+
jupyterlab @ file:///C:/b/abs_c1msr8zz3y/croot/jupyterlab_1686179674844/work
|
| 179 |
+
jupyterlab-pygments @ file:///tmp/build/80754af9/jupyterlab_pygments_1601490720602/work
|
| 180 |
+
jupyterlab-widgets @ file:///C:/b/abs_38ad427jkz/croot/jupyterlab_widgets_1679055289211/work
|
| 181 |
+
jupyterlab_server @ file:///C:/b/abs_e0qqsihjvl/croot/jupyterlab_server_1680792526136/work
|
| 182 |
+
kaleido @ file:///C:/b/abs_60smvjz1os/croot/python-kaleido_1689927138239/work
|
| 183 |
+
keras==2.14.0
|
| 184 |
+
keyring @ file:///C:/b/abs_dbjc7g0dh2/croot/keyring_1678999228878/work
|
| 185 |
+
kiwisolver @ file:///C:/ci_311/kiwisolver_1676431979301/work
|
| 186 |
+
kneed==0.8.5
|
| 187 |
+
lazy-object-proxy @ file:///C:/ci_311/lazy-object-proxy_1676432050939/work
|
| 188 |
+
lazy_loader @ file:///C:/b/abs_c9jlw06oq1/croot/lazy_loader_1687266162676/work
|
| 189 |
+
libarchive-c @ file:///tmp/build/80754af9/python-libarchive-c_1617780486945/work
|
| 190 |
+
libclang==16.0.6
|
| 191 |
+
libmambapy @ file:///C:/b/abs_71g8gec0dd/croot/mamba-split_1694187821755/work/libmambapy
|
| 192 |
+
librosa==0.10.1
|
| 193 |
+
linkify-it-py @ file:///C:/ci_311/linkify-it-py_1676474436187/work
|
| 194 |
+
llvmlite @ file:///C:/b/abs_a8i9keuf6p/croot/llvmlite_1683555140340/work
|
| 195 |
+
lmdb @ file:///C:/b/abs_556ronuvb2/croot/python-lmdb_1682522366268/work
|
| 196 |
+
locket @ file:///C:/ci_311/locket_1676428325082/work
|
| 197 |
+
lxml @ file:///C:/b/abs_9e7tpg2vv9/croot/lxml_1695058219431/work
|
| 198 |
+
lz4 @ file:///C:/b/abs_064u6aszy3/croot/lz4_1686057967376/work
|
| 199 |
+
Markdown @ file:///C:/ci_311/markdown_1676437912393/work
|
| 200 |
+
markdown-it-py @ file:///C:/b/abs_a5bfngz6fu/croot/markdown-it-py_1684279915556/work
|
| 201 |
+
MarkupSafe @ file:///C:/ci_311/markupsafe_1676424152318/work
|
| 202 |
+
matplotlib @ file:///C:/b/abs_085jhivdha/croot/matplotlib-suite_1693812524572/work
|
| 203 |
+
matplotlib-inline @ file:///C:/ci_311/matplotlib-inline_1676425798036/work
|
| 204 |
+
mccabe @ file:///opt/conda/conda-bld/mccabe_1644221741721/work
|
| 205 |
+
mdit-py-plugins @ file:///C:/ci_311/mdit-py-plugins_1676481827414/work
|
| 206 |
+
mdurl @ file:///C:/ci_311/mdurl_1676442676678/work
|
| 207 |
+
meegkit==0.1.3
|
| 208 |
+
menuinst @ file:///C:/ci_311/menuinst_1678730372782/work
|
| 209 |
+
mistune @ file:///C:/ci_311/mistune_1676425149302/work
|
| 210 |
+
mkl-fft @ file:///C:/b/abs_19i1y8ykas/croot/mkl_fft_1695058226480/work
|
| 211 |
+
mkl-random @ file:///C:/b/abs_edwkj1_o69/croot/mkl_random_1695059866750/work
|
| 212 |
+
mkl-service==2.4.0
|
| 213 |
+
ml-dtypes==0.2.0
|
| 214 |
+
mne==1.5.1
|
| 215 |
+
mne-connectivity==0.5.0
|
| 216 |
+
mne-icalabel==0.5.1
|
| 217 |
+
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
|
| 218 |
+
mpld3==0.5.10
|
| 219 |
+
mpmath @ file:///C:/b/abs_7833jrbiox/croot/mpmath_1690848321154/work
|
| 220 |
+
msgpack @ file:///C:/ci_311/msgpack-python_1676427482892/work
|
| 221 |
+
multidict @ file:///C:/ci_311/multidict_1676428396308/work
|
| 222 |
+
multipledispatch @ file:///C:/ci_311/multipledispatch_1676442767760/work
|
| 223 |
+
multiprocess @ file:///C:/ci_311/multiprocess_1676442808395/work
|
| 224 |
+
munkres==1.1.4
|
| 225 |
+
mypy-extensions @ file:///C:/b/abs_8f7xiidjya/croot/mypy_extensions_1695131051147/work
|
| 226 |
+
navigator-updater @ file:///C:/b/abs_895otdwmo9/croot/navigator-updater_1695210220239/work
|
| 227 |
+
nbclassic @ file:///C:/b/abs_c8_rs7b3zw/croot/nbclassic_1681756186106/work
|
| 228 |
+
nbclient @ file:///C:/ci_311/nbclient_1676425195918/work
|
| 229 |
+
nbconvert @ file:///C:/ci_311/nbconvert_1676425836196/work
|
| 230 |
+
nbformat @ file:///C:/b/abs_5a2nea1iu2/croot/nbformat_1694616866197/work
|
| 231 |
+
nest-asyncio @ file:///C:/ci_311/nest-asyncio_1676423519896/work
|
| 232 |
+
netCDF4==1.6.5
|
| 233 |
+
networkx @ file:///C:/b/abs_e6gi1go5op/croot/networkx_1690562046966/work
|
| 234 |
+
nibabel==5.2.1
|
| 235 |
+
nltk @ file:///C:/b/abs_a638z6l1z0/croot/nltk_1688114186909/work
|
| 236 |
+
notebook @ file:///C:/b/abs_e2qn6c85jb/croot/notebook_1690985290943/work
|
| 237 |
+
notebook_shim @ file:///C:/ci_311/notebook-shim_1678144850856/work
|
| 238 |
+
numba @ file:///C:/b/abs_00f2z7znbq/croot/numba_1690878309825/work
|
| 239 |
+
numexpr @ file:///C:/b/abs_afm0oewmmt/croot/numexpr_1683221839116/work
|
| 240 |
+
numpy @ file:///C:/Users/dev-admin/mkl/numpy_and_numpy_base_1682982345978/work
|
| 241 |
+
numpydoc @ file:///C:/ci_311/numpydoc_1676453412027/work
|
| 242 |
+
oauthlib==3.2.2
|
| 243 |
+
omegaconf==2.3.0
|
| 244 |
+
onnxruntime==1.16.2
|
| 245 |
+
openai==1.60.2
|
| 246 |
+
opencv-python==4.8.1.78
|
| 247 |
+
openpyxl==3.0.10
|
| 248 |
+
opt-einsum==3.3.0
|
| 249 |
+
orjson==3.11.3
|
| 250 |
+
packaging @ file:///C:/b/abs_28t5mcoltc/croot/packaging_1693575224052/work
|
| 251 |
+
pandas @ file:///C:/miniconda3/conda-bld/pandas_1692298018988/work
|
| 252 |
+
pandocfilters @ file:///opt/conda/conda-bld/pandocfilters_1643405455980/work
|
| 253 |
+
panel @ file:///C:/b/abs_a4rd7zrkc6/croot/panel_1695145945642/work
|
| 254 |
+
param @ file:///C:/b/abs_f5xzp6ism6/croot/param_1684915326009/work
|
| 255 |
+
paramiko @ file:///opt/conda/conda-bld/paramiko_1640109032755/work
|
| 256 |
+
parsel @ file:///C:/ci_311/parsel_1676443327188/work
|
| 257 |
+
parso @ file:///opt/conda/conda-bld/parso_1641458642106/work
|
| 258 |
+
partd @ file:///C:/b/abs_4e2m_ds81n/croot/partd_1693937921136/work
|
| 259 |
+
pathlib @ file:///Users/ktietz/demo/mc3/conda-bld/pathlib_1629713961906/work
|
| 260 |
+
pathspec @ file:///C:/ci_311/pathspec_1679427644142/work
|
| 261 |
+
patsy==0.5.3
|
| 262 |
+
pep8==1.7.1
|
| 263 |
+
pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
|
| 264 |
+
pickleshare @ file:///tmp/build/80754af9/pickleshare_1606932040724/work
|
| 265 |
+
Pillow @ file:///C:/b/abs_153xikw91n/croot/pillow_1695134603563/work
|
| 266 |
+
pkce @ file:///C:/b/abs_d0z4444tb0/croot/pkce_1690384879799/work
|
| 267 |
+
pkginfo @ file:///C:/b/abs_d18srtr68x/croot/pkginfo_1679431192239/work
|
| 268 |
+
platformdirs @ file:///C:/b/abs_b6z_yqw_ii/croot/platformdirs_1692205479426/work
|
| 269 |
+
plotly @ file:///C:/ci_311/plotly_1676443558683/work
|
| 270 |
+
pluggy @ file:///C:/ci_311/pluggy_1676422178143/work
|
| 271 |
+
ply==3.11
|
| 272 |
+
pooch==1.8.0
|
| 273 |
+
poyo @ file:///tmp/build/80754af9/poyo_1617751526755/work
|
| 274 |
+
praat-parselmouth==0.4.3
|
| 275 |
+
prometheus-client @ file:///C:/ci_311/prometheus_client_1679591942558/work
|
| 276 |
+
prompt-toolkit @ file:///C:/ci_311/prompt-toolkit_1676425940920/work
|
| 277 |
+
Protego @ file:///tmp/build/80754af9/protego_1598657180827/work
|
| 278 |
+
protobuf==4.23.4
|
| 279 |
+
psutil @ file:///C:/ci_311_rebuilds/psutil_1679005906571/work
|
| 280 |
+
ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
|
| 281 |
+
pure-eval @ file:///opt/conda/conda-bld/pure_eval_1646925070566/work
|
| 282 |
+
py-cpuinfo @ file:///Users/ktietz/demo/mc3/conda-bld/py-cpuinfo_1629480366017/work
|
| 283 |
+
pyarrow==11.0.0
|
| 284 |
+
pyasn1 @ file:///Users/ktietz/demo/mc3/conda-bld/pyasn1_1629708007385/work
|
| 285 |
+
pyasn1-modules==0.2.8
|
| 286 |
+
pycodestyle @ file:///C:/ci_311/pycodestyle_1678376707834/work
|
| 287 |
+
pycosat @ file:///C:/ci_311/pycosat_1676438455539/work
|
| 288 |
+
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
|
| 289 |
+
pyct @ file:///C:/ci_311/pyct_1676438538057/work
|
| 290 |
+
pycurl==7.45.2
|
| 291 |
+
pydantic==2.11.7
|
| 292 |
+
pydantic_core==2.33.2
|
| 293 |
+
PyDispatcher==2.0.5
|
| 294 |
+
pydocstyle @ file:///C:/ci_311/pydocstyle_1678402028085/work
|
| 295 |
+
pydub==0.25.1
|
| 296 |
+
pyEDFlib==0.1.37
|
| 297 |
+
pyerfa @ file:///C:/ci_311/pyerfa_1676503994641/work
|
| 298 |
+
pyflakes @ file:///C:/ci_311/pyflakes_1678402101687/work
|
| 299 |
+
Pygments @ file:///C:/b/abs_fay9dpq4n_/croot/pygments_1684279990574/work
|
| 300 |
+
PyJWT @ file:///C:/ci_311/pyjwt_1676438890509/work
|
| 301 |
+
pylint @ file:///C:/ci_311/pylint_1678740302984/work
|
| 302 |
+
pylint-venv @ file:///C:/ci_311/pylint-venv_1678402170638/work
|
| 303 |
+
pyls-spyder==0.4.0
|
| 304 |
+
pymatreader==0.0.32
|
| 305 |
+
PyNaCl @ file:///C:/ci_311/pynacl_1676445861112/work
|
| 306 |
+
pyodbc @ file:///C:/ci_311/pyodbc_1676489976744/work
|
| 307 |
+
pyOpenSSL @ file:///C:/b/abs_08f38zyck4/croot/pyopenssl_1690225407403/work
|
| 308 |
+
pyparsing @ file:///C:/ci_311/pyparsing_1678502182533/work
|
| 309 |
+
PyQt5==5.15.7
|
| 310 |
+
PyQt5-sip @ file:///C:/ci_311/pyqt-split_1676428895938/work/pyqt_sip
|
| 311 |
+
PyQtWebEngine==5.15.4
|
| 312 |
+
pyreadline3==3.4.1
|
| 313 |
+
pyriemann==0.5
|
| 314 |
+
pyrsistent @ file:///C:/ci_311/pyrsistent_1676422695500/work
|
| 315 |
+
PySocks @ file:///C:/ci_311/pysocks_1676425991111/work
|
| 316 |
+
pytest @ file:///C:/b/abs_48heoo_k8y/croot/pytest_1690475385915/work
|
| 317 |
+
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
|
| 318 |
+
python-dotenv @ file:///C:/ci_311/python-dotenv_1676455170580/work
|
| 319 |
+
python-json-logger @ file:///C:/b/abs_cblnsm6puj/croot/python-json-logger_1683824130469/work
|
| 320 |
+
python-lsp-black @ file:///C:/ci_311/python-lsp-black_1678721855627/work
|
| 321 |
+
python-lsp-jsonrpc==1.0.0
|
| 322 |
+
python-lsp-server @ file:///C:/b/abs_catecj7fv1/croot/python-lsp-server_1681930405912/work
|
| 323 |
+
python-multipart==0.0.20
|
| 324 |
+
python-slugify @ file:///tmp/build/80754af9/python-slugify_1620405669636/work
|
| 325 |
+
python-snappy @ file:///C:/ci_311/python-snappy_1676446060182/work
|
| 326 |
+
pytoolconfig @ file:///C:/ci_311/pytoolconfig_1678402262175/work
|
| 327 |
+
pytorch-pretrained-biggan==0.1.1
|
| 328 |
+
pytz @ file:///C:/b/abs_19q3ljkez4/croot/pytz_1695131651401/work
|
| 329 |
+
pyviz-comms @ file:///C:/b/abs_6cq38vhwa5/croot/pyviz_comms_1685030740344/work
|
| 330 |
+
PyWavelets @ file:///C:/ci_311/pywavelets_1676504105729/work
|
| 331 |
+
pywin32==305.1
|
| 332 |
+
pywin32-ctypes @ file:///C:/ci_311/pywin32-ctypes_1676427747089/work
|
| 333 |
+
pywinpty @ file:///C:/ci_311/pywinpty_1677707791185/work/target/wheels/pywinpty-2.0.10-cp311-none-win_amd64.whl
|
| 334 |
+
PyYAML @ file:///C:/ci_311/pyyaml_1676432488822/work
|
| 335 |
+
pyzmq @ file:///C:/ci_311/pyzmq_1676423601304/work
|
| 336 |
+
QDarkStyle @ file:///tmp/build/80754af9/qdarkstyle_1617386714626/work
|
| 337 |
+
qstylizer @ file:///C:/ci_311/qstylizer_1678502012152/work/dist/qstylizer-0.2.2-py2.py3-none-any.whl
|
| 338 |
+
QtAwesome @ file:///C:/ci_311/qtawesome_1678402331535/work
|
| 339 |
+
qtconsole @ file:///C:/b/abs_eb4u9jg07y/croot/qtconsole_1681402843494/work
|
| 340 |
+
QtPy @ file:///C:/ci_311/qtpy_1676432558504/work
|
| 341 |
+
queuelib==1.5.0
|
| 342 |
+
regex @ file:///C:/ci_311_rebuilds/regex_1679006156792/work
|
| 343 |
+
requests @ file:///C:/b/abs_316c2inijk/croot/requests_1690400295842/work
|
| 344 |
+
requests-file @ file:///Users/ktietz/demo/mc3/conda-bld/requests-file_1629455781986/work
|
| 345 |
+
requests-oauthlib==1.3.1
|
| 346 |
+
requests-toolbelt @ file:///C:/b/abs_2fsmts66wp/croot/requests-toolbelt_1690874051210/work
|
| 347 |
+
responses @ file:///tmp/build/80754af9/responses_1619800270522/work
|
| 348 |
+
rfc3339-validator @ file:///C:/b/abs_ddfmseb_vm/croot/rfc3339-validator_1683077054906/work
|
| 349 |
+
rfc3986-validator @ file:///C:/b/abs_6e9azihr8o/croot/rfc3986-validator_1683059049737/work
|
| 350 |
+
rich==14.1.0
|
| 351 |
+
rope @ file:///C:/ci_311/rope_1678402524346/work
|
| 352 |
+
rsa==4.9
|
| 353 |
+
Rtree @ file:///C:/ci_311/rtree_1676455758391/work
|
| 354 |
+
ruamel-yaml-conda @ file:///C:/ci_311/ruamel_yaml_1676455799258/work
|
| 355 |
+
ruamel.yaml @ file:///C:/ci_311/ruamel.yaml_1676439214109/work
|
| 356 |
+
ruff==0.13.0
|
| 357 |
+
s3fs @ file:///C:/b/abs_adfhcfx438/croot/s3fs_1682551489845/work
|
| 358 |
+
s3transfer==0.10.2
|
| 359 |
+
safehttpx==0.1.6
|
| 360 |
+
safetensors==0.5.2
|
| 361 |
+
scikit-image @ file:///C:/b/abs_2075zg1pia/croot/scikit-image_1682528361447/work
|
| 362 |
+
scikit-learn @ file:///C:/b/abs_55olq_4gzc/croot/scikit-learn_1690978955123/work
|
| 363 |
+
scikit-learn-intelex==20230426.121932
|
| 364 |
+
scipy==1.11.1
|
| 365 |
+
Scrapy @ file:///C:/ci_311/scrapy_1678502587780/work
|
| 366 |
+
seaborn @ file:///C:/ci_311/seaborn_1676446547861/work
|
| 367 |
+
semantic-version==2.10.0
|
| 368 |
+
Send2Trash @ file:///tmp/build/80754af9/send2trash_1632406701022/work
|
| 369 |
+
service-identity @ file:///Users/ktietz/demo/mc3/conda-bld/service_identity_1629460757137/work
|
| 370 |
+
shellingham==1.5.4
|
| 371 |
+
sip @ file:///C:/ci_311/sip_1676427825172/work
|
| 372 |
+
six @ file:///tmp/build/80754af9/six_1644875935023/work
|
| 373 |
+
smart-open @ file:///C:/ci_311/smart_open_1676439339434/work
|
| 374 |
+
sniffio @ file:///C:/ci_311/sniffio_1676425339093/work
|
| 375 |
+
snowballstemmer @ file:///tmp/build/80754af9/snowballstemmer_1637937080595/work
|
| 376 |
+
sortedcontainers @ file:///tmp/build/80754af9/sortedcontainers_1623949099177/work
|
| 377 |
+
soundfile==0.12.1
|
| 378 |
+
soupsieve @ file:///C:/b/abs_a989exj3q6/croot/soupsieve_1680518492466/work
|
| 379 |
+
sox==1.5.0
|
| 380 |
+
soxr==0.3.7
|
| 381 |
+
Sphinx @ file:///C:/ci_311/sphinx_1676434546244/work
|
| 382 |
+
sphinxcontrib-applehelp @ file:///home/ktietz/src/ci/sphinxcontrib-applehelp_1611920841464/work
|
| 383 |
+
sphinxcontrib-devhelp @ file:///home/ktietz/src/ci/sphinxcontrib-devhelp_1611920923094/work
|
| 384 |
+
sphinxcontrib-htmlhelp @ file:///tmp/build/80754af9/sphinxcontrib-htmlhelp_1623945626792/work
|
| 385 |
+
sphinxcontrib-jsmath @ file:///home/ktietz/src/ci/sphinxcontrib-jsmath_1611920942228/work
|
| 386 |
+
sphinxcontrib-qthelp @ file:///home/ktietz/src/ci/sphinxcontrib-qthelp_1611921055322/work
|
| 387 |
+
sphinxcontrib-serializinghtml @ file:///tmp/build/80754af9/sphinxcontrib-serializinghtml_1624451540180/work
|
| 388 |
+
spyder @ file:///C:/b/abs_e99kl7d8t0/croot/spyder_1681934304813/work
|
| 389 |
+
spyder-kernels @ file:///C:/b/abs_e788a8_4y9/croot/spyder-kernels_1691599588437/work
|
| 390 |
+
SQLAlchemy @ file:///C:/ci_311/sqlalchemy_1676446707912/work
|
| 391 |
+
stack-data @ file:///opt/conda/conda-bld/stack_data_1646927590127/work
|
| 392 |
+
starlette==0.47.3
|
| 393 |
+
statsmodels @ file:///C:/b/abs_7bth810rna/croot/statsmodels_1689937298619/work
|
| 394 |
+
sympy @ file:///C:/ci_311_rebuilds/sympy_1679009400182/work
|
| 395 |
+
tables @ file:///C:/b/abs_0626auep9v/croot/pytables_1691623892917/work
|
| 396 |
+
tabulate @ file:///C:/ci_311/tabulate_1676494503192/work
|
| 397 |
+
TBB==0.2
|
| 398 |
+
tblib @ file:///Users/ktietz/demo/mc3/conda-bld/tblib_1629402031467/work
|
| 399 |
+
tenacity @ file:///C:/b/abs_ddkoa9nju6/croot/tenacity_1682972298929/work
|
| 400 |
+
tensorboard==2.14.1
|
| 401 |
+
tensorboard-data-server==0.7.2
|
| 402 |
+
tensorflow==2.14.1
|
| 403 |
+
tensorflow-estimator==2.14.0
|
| 404 |
+
tensorflow-intel==2.14.1
|
| 405 |
+
tensorflow-io-gcs-filesystem==0.31.0
|
| 406 |
+
termcolor==2.4.0
|
| 407 |
+
terminado @ file:///C:/ci_311/terminado_1678228513830/work
|
| 408 |
+
text-unidecode @ file:///Users/ktietz/demo/mc3/conda-bld/text-unidecode_1629401354553/work
|
| 409 |
+
textdistance @ file:///tmp/build/80754af9/textdistance_1612461398012/work
|
| 410 |
+
tf-explain==0.3.1
|
| 411 |
+
threadpoolctl @ file:///Users/ktietz/demo/mc3/conda-bld/threadpoolctl_1629802263681/work
|
| 412 |
+
three-merge @ file:///tmp/build/80754af9/three-merge_1607553261110/work
|
| 413 |
+
tifffile @ file:///C:/b/abs_45o5chuqwt/croot/tifffile_1695107511025/work
|
| 414 |
+
tinycss2 @ file:///C:/ci_311/tinycss2_1676425376744/work
|
| 415 |
+
tldextract @ file:///opt/conda/conda-bld/tldextract_1646638314385/work
|
| 416 |
+
tokenizers==0.21.0
|
| 417 |
+
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
|
| 418 |
+
tomlkit==0.13.3
|
| 419 |
+
toolz @ file:///C:/ci_311/toolz_1676431406517/work
|
| 420 |
+
torch==2.1.0+cu118
|
| 421 |
+
torch_geometric==2.4.0
|
| 422 |
+
torchaudio==2.1.0+cu118
|
| 423 |
+
torchvision==0.16.0+cu118
|
| 424 |
+
tornado @ file:///C:/b/abs_61jhmrrua1/croot/tornado_1690848767317/work
|
| 425 |
+
tqdm @ file:///C:/b/abs_f76j9hg7pv/croot/tqdm_1679561871187/work
|
| 426 |
+
traitlets @ file:///C:/ci_311/traitlets_1676423290727/work
|
| 427 |
+
transformers==4.48.1
|
| 428 |
+
Twisted @ file:///C:/b/abs_f1pc_rieoy/croot/twisted_1683796899561/work
|
| 429 |
+
twisted-iocpsupport @ file:///C:/ci_311/twisted-iocpsupport_1676447612160/work
|
| 430 |
+
typer==0.17.4
|
| 431 |
+
typing-inspection==0.4.1
|
| 432 |
+
typing_extensions==4.12.2
|
| 433 |
+
tzdata @ file:///croot/python-tzdata_1690578112552/work
|
| 434 |
+
uc-micro-py @ file:///C:/ci_311/uc-micro-py_1676457695423/work
|
| 435 |
+
ujson @ file:///C:/ci_311/ujson_1676434714224/work
|
| 436 |
+
umap==0.1.1
|
| 437 |
+
Unidecode @ file:///tmp/build/80754af9/unidecode_1614712377438/work
|
| 438 |
+
urllib3 @ file:///C:/b/abs_889_loyqv4/croot/urllib3_1686163174463/work
|
| 439 |
+
uvicorn==0.35.0
|
| 440 |
+
w3lib @ file:///Users/ktietz/demo/mc3/conda-bld/w3lib_1629359764703/work
|
| 441 |
+
watchdog @ file:///C:/ci_311/watchdog_1676457923624/work
|
| 442 |
+
wcwidth @ file:///Users/ktietz/demo/mc3/conda-bld/wcwidth_1629357192024/work
|
| 443 |
+
webencodings==0.5.1
|
| 444 |
+
webrtcvad==2.0.10
|
| 445 |
+
websocket-client @ file:///C:/ci_311/websocket-client_1676426063281/work
|
| 446 |
+
websockets==15.0.1
|
| 447 |
+
Werkzeug @ file:///C:/b/abs_8578rs2ra_/croot/werkzeug_1679489759009/work
|
| 448 |
+
whatthepatch @ file:///C:/ci_311/whatthepatch_1678402578113/work
|
| 449 |
+
widgetsnbextension @ file:///C:/b/abs_882k4_4kdf/croot/widgetsnbextension_1679313880295/work
|
| 450 |
+
win-inet-pton @ file:///C:/ci_311/win_inet_pton_1676425458225/work
|
| 451 |
+
wrapt @ file:///C:/ci_311/wrapt_1676432805090/work
|
| 452 |
+
xarray @ file:///C:/b/abs_5bkjiynp4e/croot/xarray_1689041498548/work
|
| 453 |
+
XlsxWriter==3.1.9
|
| 454 |
+
xlwings @ file:///C:/ci_311_rebuilds/xlwings_1679013429160/work
|
| 455 |
+
xmltodict==0.13.0
|
| 456 |
+
xxhash @ file:///C:/ci_311/python-xxhash_1676446168786/work
|
| 457 |
+
xyzservices @ file:///C:/ci_311/xyzservices_1676434829315/work
|
| 458 |
+
y-py @ file:///C:/b/abs_b7f5go6r0j/croot/y-py_1683662173571/work
|
| 459 |
+
yapf @ file:///tmp/build/80754af9/yapf_1615749224965/work
|
| 460 |
+
yarl @ file:///C:/ci_311/yarl_1676432870023/work
|
| 461 |
+
ypy-websocket @ file:///C:/b/abs_4e65ywlnv8/croot/ypy-websocket_1684172103529/work
|
| 462 |
+
zict @ file:///C:/b/abs_fc7elavmem/croot/zict_1682698759288/work
|
| 463 |
+
zipp @ file:///C:/ci_311/zipp_1676426100491/work
|
| 464 |
+
zope.interface @ file:///C:/ci_311/zope.interface_1676439868776/work
|
| 465 |
+
zstandard==0.19.0
|
test.py
ADDED
|
@@ -0,0 +1,722 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import plotly.graph_objects as go
|
| 3 |
+
from plotly.subplots import make_subplots
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import requests
|
| 8 |
+
from io import BytesIO
|
| 9 |
+
import json
|
| 10 |
+
|
| 11 |
+
class DataLoader:
|
| 12 |
+
"""Handles loading and parsing of similarity data"""
|
| 13 |
+
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.data = None
|
| 16 |
+
self.ml_models = []
|
| 17 |
+
self.brain_measures = []
|
| 18 |
+
self.model_categories = {'vision': [], 'language': [], 'semantic': [], 'other': []}
|
| 19 |
+
|
| 20 |
+
def get_model_type(self, model_name):
|
| 21 |
+
"""Categorize model types"""
|
| 22 |
+
if any(x in model_name for x in ["bm25", "rouge", "tf-idf", "co-occurrence"]):
|
| 23 |
+
return "semantic"
|
| 24 |
+
elif any(x in model_name for x in ["bert", "deberta", "simcse"]):
|
| 25 |
+
return "language"
|
| 26 |
+
elif "timm_" in model_name:
|
| 27 |
+
return "vision"
|
| 28 |
+
else:
|
| 29 |
+
return "other"
|
| 30 |
+
|
| 31 |
+
def load_csv(self, csv_path):
|
| 32 |
+
"""Load similarity data from CSV file"""
|
| 33 |
+
try:
|
| 34 |
+
self.data = pd.read_csv(csv_path)
|
| 35 |
+
|
| 36 |
+
# Get ALL ML model columns from your data
|
| 37 |
+
self.ml_models = [col for col in self.data.columns if
|
| 38 |
+
'BOLD5000_timm_' in col or
|
| 39 |
+
'bert-' in col or
|
| 40 |
+
'deberta-' in col or
|
| 41 |
+
'sup-simcse' in col or
|
| 42 |
+
col in ['bm25S_captions', 'co-occurrence-rep_tags', 'co-occurrence_captions',
|
| 43 |
+
'bm25S_tags', 'co-occurrence_tags', 'rouge_captions',
|
| 44 |
+
'tf-idf-cosine_captions', 'rouge_tags', 'tf-idf-cosine_tags']]
|
| 45 |
+
|
| 46 |
+
# Categorize models
|
| 47 |
+
self.model_categories = {'vision': [], 'language': [], 'semantic': [], 'other': []}
|
| 48 |
+
for i, model in enumerate(self.ml_models):
|
| 49 |
+
category = self.get_model_type(model)
|
| 50 |
+
self.model_categories[category].append((model, i))
|
| 51 |
+
|
| 52 |
+
# Get brain response columns - both cosine and pearson
|
| 53 |
+
self.brain_measures = []
|
| 54 |
+
for measure_type in ['cosine_similarity', 'pearson_correlation']:
|
| 55 |
+
for roi_type in ['common', 'early', 'late']:
|
| 56 |
+
col_name = f'{measure_type}_roi_values_{roi_type}'
|
| 57 |
+
if col_name in self.data.columns:
|
| 58 |
+
self.brain_measures.append(col_name)
|
| 59 |
+
|
| 60 |
+
print(f"Loaded {len(self.data)} image pairs")
|
| 61 |
+
print(f"Found {len(self.ml_models)} ML models total:")
|
| 62 |
+
print(f" Vision: {len(self.model_categories['vision'])}")
|
| 63 |
+
print(f" Language: {len(self.model_categories['language'])}")
|
| 64 |
+
print(f" Semantic: {len(self.model_categories['semantic'])}")
|
| 65 |
+
print(f" Other: {len(self.model_categories['other'])}")
|
| 66 |
+
print(f"Brain measures: {self.brain_measures}")
|
| 67 |
+
|
| 68 |
+
return True
|
| 69 |
+
except Exception as e:
|
| 70 |
+
print(f"Error loading data: {e}")
|
| 71 |
+
return False
|
| 72 |
+
|
| 73 |
+
def get_ml_model_options(self):
|
| 74 |
+
"""Get ML model options with averages and complete names"""
|
| 75 |
+
options = []
|
| 76 |
+
|
| 77 |
+
# Add category averages first
|
| 78 |
+
if self.model_categories['vision']:
|
| 79 |
+
options.append(("AVERAGE - Vision Models", "avg_vision"))
|
| 80 |
+
if self.model_categories['language']:
|
| 81 |
+
options.append(("AVERAGE - Language Models", "avg_language"))
|
| 82 |
+
if self.model_categories['semantic']:
|
| 83 |
+
options.append(("AVERAGE - Semantic Models", "avg_semantic"))
|
| 84 |
+
|
| 85 |
+
# Add separator
|
| 86 |
+
if any(self.model_categories.values()):
|
| 87 |
+
options.append(("────────────────────────────", "separator"))
|
| 88 |
+
|
| 89 |
+
# Add all individual models with their EXACT column names
|
| 90 |
+
for i, model_name in enumerate(self.ml_models):
|
| 91 |
+
options.append((model_name, i))
|
| 92 |
+
|
| 93 |
+
return options
|
| 94 |
+
|
| 95 |
+
def get_brain_measure_options(self):
|
| 96 |
+
"""Get brain measure options with clean names"""
|
| 97 |
+
options = []
|
| 98 |
+
for measure in self.brain_measures:
|
| 99 |
+
# Parse measure name for display
|
| 100 |
+
if 'cosine_similarity' in measure and 'common' in measure:
|
| 101 |
+
options.append(("Cosine - Common", measure))
|
| 102 |
+
elif 'cosine_similarity' in measure and 'early' in measure:
|
| 103 |
+
options.append(("Cosine - Early", measure))
|
| 104 |
+
elif 'cosine_similarity' in measure and 'late' in measure:
|
| 105 |
+
options.append(("Cosine - Late", measure))
|
| 106 |
+
elif 'pearson_correlation' in measure and 'common' in measure:
|
| 107 |
+
options.append(("Pearson - Common", measure))
|
| 108 |
+
elif 'pearson_correlation' in measure and 'early' in measure:
|
| 109 |
+
options.append(("Pearson - Early", measure))
|
| 110 |
+
elif 'pearson_correlation' in measure and 'late' in measure:
|
| 111 |
+
options.append(("Pearson - Late", measure))
|
| 112 |
+
return options
|
| 113 |
+
|
| 114 |
+
class ROIVisualizer:
|
| 115 |
+
"""Handles ROI data visualization"""
|
| 116 |
+
|
| 117 |
+
@staticmethod
|
| 118 |
+
def parse_roi_values(roi_string):
|
| 119 |
+
"""Parse ROI values from string format"""
|
| 120 |
+
try:
|
| 121 |
+
if pd.isna(roi_string) or roi_string == '':
|
| 122 |
+
return None
|
| 123 |
+
# Remove brackets and split by spaces or commas
|
| 124 |
+
roi_string = roi_string.strip('[]')
|
| 125 |
+
values = [float(x) for x in roi_string.replace(',', ' ').split() if x.strip()]
|
| 126 |
+
return np.array(values)
|
| 127 |
+
except Exception as e:
|
| 128 |
+
print(f"Error parsing ROI values: {e}")
|
| 129 |
+
return None
|
| 130 |
+
|
| 131 |
+
@staticmethod
|
| 132 |
+
def create_roi_comparison_plot(data, row_index):
|
| 133 |
+
"""Create side-by-side bar plots comparing ROI values"""
|
| 134 |
+
try:
|
| 135 |
+
if row_index >= len(data):
|
| 136 |
+
return None
|
| 137 |
+
|
| 138 |
+
row = data.iloc[row_index]
|
| 139 |
+
|
| 140 |
+
# Parse ROI values for both images
|
| 141 |
+
roi1 = ROIVisualizer.parse_roi_values(row.get('image_1_roi_values', ''))
|
| 142 |
+
roi2 = ROIVisualizer.parse_roi_values(row.get('image_2_roi_values', ''))
|
| 143 |
+
|
| 144 |
+
if roi1 is None or roi2 is None:
|
| 145 |
+
return None
|
| 146 |
+
|
| 147 |
+
# Ensure both arrays have the same length
|
| 148 |
+
min_len = min(len(roi1), len(roi2))
|
| 149 |
+
roi1 = roi1[:min_len]
|
| 150 |
+
roi2 = roi2[:min_len]
|
| 151 |
+
|
| 152 |
+
# Create ROI region labels
|
| 153 |
+
roi_labels = [f'ROI_{i+1}' for i in range(min_len)]
|
| 154 |
+
|
| 155 |
+
# Create subplot
|
| 156 |
+
fig = make_subplots(
|
| 157 |
+
rows=1, cols=2,
|
| 158 |
+
subplot_titles=[f'Image 1: {row["image_1"]}', f'Image 2: {row["image_2"]}']
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# Add bar plots
|
| 162 |
+
fig.add_trace(
|
| 163 |
+
go.Bar(
|
| 164 |
+
x=roi_labels,
|
| 165 |
+
y=roi1,
|
| 166 |
+
name='Image 1',
|
| 167 |
+
marker_color='lightblue',
|
| 168 |
+
showlegend=False
|
| 169 |
+
),
|
| 170 |
+
row=1, col=1
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
fig.add_trace(
|
| 174 |
+
go.Bar(
|
| 175 |
+
x=roi_labels,
|
| 176 |
+
y=roi2,
|
| 177 |
+
name='Image 2',
|
| 178 |
+
marker_color='lightcoral',
|
| 179 |
+
showlegend=False
|
| 180 |
+
),
|
| 181 |
+
row=1, col=2
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# Calculate similarities
|
| 185 |
+
cosine_sim = np.dot(roi1, roi2) / (np.linalg.norm(roi1) * np.linalg.norm(roi2))
|
| 186 |
+
pearson_sim = np.corrcoef(roi1, roi2)[0, 1]
|
| 187 |
+
|
| 188 |
+
# Update layout
|
| 189 |
+
fig.update_layout(
|
| 190 |
+
title=f'ROI Comparison - Pair #{row_index}<br>Cosine: {cosine_sim:.3f}, Pearson: {pearson_sim:.3f}',
|
| 191 |
+
height=500,
|
| 192 |
+
width=1000,
|
| 193 |
+
showlegend=False
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
# Update x-axis labels
|
| 197 |
+
fig.update_xaxes(tickangle=45)
|
| 198 |
+
fig.update_yaxes(title_text="Activation Level")
|
| 199 |
+
|
| 200 |
+
return fig
|
| 201 |
+
|
| 202 |
+
except Exception as e:
|
| 203 |
+
print(f"Error creating ROI plot: {e}")
|
| 204 |
+
return None
|
| 205 |
+
|
| 206 |
+
class PlotGenerator:
|
| 207 |
+
"""Handles creation of plotly visualizations"""
|
| 208 |
+
|
| 209 |
+
def __init__(self, data_loader):
|
| 210 |
+
self.data_loader = data_loader
|
| 211 |
+
|
| 212 |
+
def get_model_data(self, ml_model_selection):
|
| 213 |
+
"""Get model data - either individual model or category average"""
|
| 214 |
+
data = self.data_loader.data
|
| 215 |
+
|
| 216 |
+
# Handle category averages
|
| 217 |
+
if ml_model_selection == "avg_vision":
|
| 218 |
+
vision_models = [model[0] for model in self.data_loader.model_categories['vision']]
|
| 219 |
+
if vision_models:
|
| 220 |
+
avg_data = data[vision_models].mean(axis=1)
|
| 221 |
+
return avg_data, "Vision Models (Average)"
|
| 222 |
+
else:
|
| 223 |
+
raise ValueError("No vision models available")
|
| 224 |
+
|
| 225 |
+
elif ml_model_selection == "avg_language":
|
| 226 |
+
language_models = [model[0] for model in self.data_loader.model_categories['language']]
|
| 227 |
+
if language_models:
|
| 228 |
+
avg_data = data[language_models].mean(axis=1)
|
| 229 |
+
return avg_data, "Language Models (Average)"
|
| 230 |
+
else:
|
| 231 |
+
raise ValueError("No language models available")
|
| 232 |
+
|
| 233 |
+
elif ml_model_selection == "avg_semantic":
|
| 234 |
+
semantic_models = [model[0] for model in self.data_loader.model_categories['semantic']]
|
| 235 |
+
if semantic_models:
|
| 236 |
+
avg_data = data[semantic_models].mean(axis=1)
|
| 237 |
+
return avg_data, "Semantic Models (Average)"
|
| 238 |
+
else:
|
| 239 |
+
raise ValueError("No semantic models available")
|
| 240 |
+
|
| 241 |
+
# Handle individual models
|
| 242 |
+
elif isinstance(ml_model_selection, int):
|
| 243 |
+
ml_column = self.data_loader.ml_models[ml_model_selection]
|
| 244 |
+
return data[ml_column], ml_column
|
| 245 |
+
|
| 246 |
+
else:
|
| 247 |
+
raise ValueError(f"Invalid model selection: {ml_model_selection}")
|
| 248 |
+
|
| 249 |
+
def create_3d_plot(self, brain_measure, ml_model_selection):
|
| 250 |
+
"""Create 3D scatter plot"""
|
| 251 |
+
data = self.data_loader.data
|
| 252 |
+
|
| 253 |
+
try:
|
| 254 |
+
ml_data, ml_name = self.get_model_data(ml_model_selection)
|
| 255 |
+
except ValueError as e:
|
| 256 |
+
print(f"Error getting model data: {e}")
|
| 257 |
+
return None
|
| 258 |
+
|
| 259 |
+
# Create hover text
|
| 260 |
+
hover_text = []
|
| 261 |
+
for idx, row in data.iterrows():
|
| 262 |
+
text = f"Pair #{idx}<br>"
|
| 263 |
+
text += f"Images: {row['image_1']} vs {row['image_2']}<br>"
|
| 264 |
+
text += f"Human: {row['human_judgement']:.3f}<br>"
|
| 265 |
+
text += f"Brain: {row[brain_measure]:.3f}<br>"
|
| 266 |
+
text += f"ML: {ml_data.iloc[idx]:.3f}"
|
| 267 |
+
hover_text.append(text)
|
| 268 |
+
|
| 269 |
+
fig = go.Figure(data=go.Scatter3d(
|
| 270 |
+
x=data['human_judgement'],
|
| 271 |
+
y=data[brain_measure],
|
| 272 |
+
z=ml_data,
|
| 273 |
+
mode='markers',
|
| 274 |
+
marker=dict(
|
| 275 |
+
size=6,
|
| 276 |
+
color=data['human_judgement'],
|
| 277 |
+
colorscale='Viridis',
|
| 278 |
+
opacity=0.7,
|
| 279 |
+
colorbar=dict(title="Human Rating")
|
| 280 |
+
),
|
| 281 |
+
text=hover_text,
|
| 282 |
+
hovertemplate='%{text}<extra></extra>',
|
| 283 |
+
customdata=data[['image_1', 'image_2', 'stim_1', 'stim_2']].values
|
| 284 |
+
))
|
| 285 |
+
|
| 286 |
+
brain_name = brain_measure.replace('cosine_similarity_roi_values_', '').replace('pearson_correlation_roi_values_', '').title()
|
| 287 |
+
measure_type = "Cosine" if "cosine" in brain_measure else "Pearson"
|
| 288 |
+
|
| 289 |
+
fig.update_layout(
|
| 290 |
+
title=f'3D Analysis: Human vs {measure_type} {brain_name} Brain vs {ml_name}',
|
| 291 |
+
scene=dict(
|
| 292 |
+
xaxis_title='Human Rating (0-5)',
|
| 293 |
+
yaxis_title=f'Brain Similarity ({measure_type} {brain_name})',
|
| 294 |
+
zaxis_title=f'ML Model: {ml_name}',
|
| 295 |
+
camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))
|
| 296 |
+
),
|
| 297 |
+
width=800,
|
| 298 |
+
height=600
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
return fig
|
| 302 |
+
|
| 303 |
+
def create_2d_plots(self, brain_measure, ml_model_selection):
|
| 304 |
+
"""Create three 2D scatter plots"""
|
| 305 |
+
data = self.data_loader.data
|
| 306 |
+
|
| 307 |
+
try:
|
| 308 |
+
ml_data, ml_name = self.get_model_data(ml_model_selection)
|
| 309 |
+
except ValueError as e:
|
| 310 |
+
print(f"Error getting model data: {e}")
|
| 311 |
+
return None
|
| 312 |
+
|
| 313 |
+
# Calculate correlations
|
| 314 |
+
corr_hb = data['human_judgement'].corr(data[brain_measure])
|
| 315 |
+
corr_hm = data['human_judgement'].corr(ml_data)
|
| 316 |
+
corr_bm = data[brain_measure].corr(ml_data)
|
| 317 |
+
|
| 318 |
+
brain_name = brain_measure.replace('cosine_similarity_roi_values_', '').replace('pearson_correlation_roi_values_', '').title()
|
| 319 |
+
measure_type = "Cosine" if "cosine" in brain_measure else "Pearson"
|
| 320 |
+
|
| 321 |
+
# Create subplot
|
| 322 |
+
fig = make_subplots(
|
| 323 |
+
rows=1, cols=3,
|
| 324 |
+
subplot_titles=[
|
| 325 |
+
f'Human vs Brain (r={corr_hb:.3f})',
|
| 326 |
+
f'Human vs ML (r={corr_hm:.3f})',
|
| 327 |
+
f'Brain vs ML (r={corr_bm:.3f})'
|
| 328 |
+
],
|
| 329 |
+
horizontal_spacing=0.1
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
# Custom data for hover - include row index
|
| 333 |
+
customdata = [[idx, row['image_1'], row['image_2']] for idx, row in data.iterrows()]
|
| 334 |
+
|
| 335 |
+
# Plot 1: Human vs Brain
|
| 336 |
+
fig.add_trace(
|
| 337 |
+
go.Scatter(
|
| 338 |
+
x=data['human_judgement'],
|
| 339 |
+
y=data[brain_measure],
|
| 340 |
+
mode='markers',
|
| 341 |
+
marker=dict(color='blue', opacity=0.6, size=3),
|
| 342 |
+
hovertemplate='Pair #%{customdata[0]}<br>Human: %{x:.3f}<br>Brain: %{y:.3f}<br>%{customdata[1]} vs %{customdata[2]}<extra></extra>',
|
| 343 |
+
customdata=customdata,
|
| 344 |
+
showlegend=False
|
| 345 |
+
),
|
| 346 |
+
row=1, col=1
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
# Plot 2: Human vs ML
|
| 350 |
+
fig.add_trace(
|
| 351 |
+
go.Scatter(
|
| 352 |
+
x=data['human_judgement'],
|
| 353 |
+
y=ml_data,
|
| 354 |
+
mode='markers',
|
| 355 |
+
marker=dict(color='red', opacity=0.6, size=3),
|
| 356 |
+
hovertemplate='Pair #%{customdata[0]}<br>Human: %{x:.3f}<br>ML: %{y:.3f}<br>%{customdata[1]} vs %{customdata[2]}<extra></extra>',
|
| 357 |
+
customdata=customdata,
|
| 358 |
+
showlegend=False
|
| 359 |
+
),
|
| 360 |
+
row=1, col=2
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
# Plot 3: Brain vs ML
|
| 364 |
+
fig.add_trace(
|
| 365 |
+
go.Scatter(
|
| 366 |
+
x=data[brain_measure],
|
| 367 |
+
y=ml_data,
|
| 368 |
+
mode='markers',
|
| 369 |
+
marker=dict(color='green', opacity=0.6, size=3),
|
| 370 |
+
hovertemplate='Pair #%{customdata[0]}<br>Brain: %{x:.3f}<br>ML: %{y:.3f}<br>%{customdata[1]} vs %{customdata[2]}<extra></extra>',
|
| 371 |
+
customdata=customdata,
|
| 372 |
+
showlegend=False
|
| 373 |
+
),
|
| 374 |
+
row=1, col=3
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
# Update layout
|
| 378 |
+
fig.update_layout(
|
| 379 |
+
title=f'2D Comparisons: {measure_type} {brain_name} Brain vs {ml_name}',
|
| 380 |
+
width=1300,
|
| 381 |
+
height=500,
|
| 382 |
+
margin=dict(l=60, r=60, t=80, b=80)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
return fig
|
| 386 |
+
|
| 387 |
+
class ImageViewer:
|
| 388 |
+
"""Handles image loading and display"""
|
| 389 |
+
|
| 390 |
+
@staticmethod
|
| 391 |
+
def load_image_from_url(url, max_size=(200, 200)):
|
| 392 |
+
"""Load and resize image from URL"""
|
| 393 |
+
try:
|
| 394 |
+
response = requests.get(url, timeout=10)
|
| 395 |
+
img = Image.open(BytesIO(response.content))
|
| 396 |
+
img.thumbnail(max_size, Image.Resampling.LANCZOS)
|
| 397 |
+
return img
|
| 398 |
+
except Exception as e:
|
| 399 |
+
print(f"Error loading image {url}: {e}")
|
| 400 |
+
placeholder = Image.new('RGB', (200, 200), color='gray')
|
| 401 |
+
return placeholder
|
| 402 |
+
|
| 403 |
+
@staticmethod
|
| 404 |
+
def get_image_pair(data, row_index):
|
| 405 |
+
"""Get image pair for a specific row"""
|
| 406 |
+
if row_index >= len(data):
|
| 407 |
+
return None, None
|
| 408 |
+
|
| 409 |
+
row = data.iloc[row_index]
|
| 410 |
+
img1_url = row.get('stim_1', '')
|
| 411 |
+
img2_url = row.get('stim_2', '')
|
| 412 |
+
|
| 413 |
+
img1 = ImageViewer.load_image_from_url(img1_url) if img1_url else None
|
| 414 |
+
img2 = ImageViewer.load_image_from_url(img2_url) if img2_url else None
|
| 415 |
+
|
| 416 |
+
return img1, img2
|
| 417 |
+
|
| 418 |
+
class SimilarityApp:
|
| 419 |
+
"""Main application class"""
|
| 420 |
+
|
| 421 |
+
def __init__(self, csv_path):
|
| 422 |
+
self.data_loader = DataLoader()
|
| 423 |
+
self.plot_generator = PlotGenerator(self.data_loader)
|
| 424 |
+
self.image_viewer = ImageViewer()
|
| 425 |
+
self.roi_visualizer = ROIVisualizer()
|
| 426 |
+
|
| 427 |
+
if not self.data_loader.load_csv(csv_path):
|
| 428 |
+
raise ValueError("Failed to load data")
|
| 429 |
+
|
| 430 |
+
def update_plots(self, brain_measure, ml_model_selection):
|
| 431 |
+
"""Update both 3D and 2D plots"""
|
| 432 |
+
try:
|
| 433 |
+
fig_3d = self.plot_generator.create_3d_plot(brain_measure, ml_model_selection)
|
| 434 |
+
fig_2d = self.plot_generator.create_2d_plots(brain_measure, ml_model_selection)
|
| 435 |
+
return fig_3d, fig_2d
|
| 436 |
+
except Exception as e:
|
| 437 |
+
print(f"Error updating plots: {e}")
|
| 438 |
+
return None, None
|
| 439 |
+
|
| 440 |
+
def get_correlations(self, brain_measure, ml_model_selection):
|
| 441 |
+
"""Get correlation statistics with min/max ranges"""
|
| 442 |
+
try:
|
| 443 |
+
data = self.data_loader.data
|
| 444 |
+
ml_data, ml_name = self.plot_generator.get_model_data(ml_model_selection)
|
| 445 |
+
|
| 446 |
+
corr_hb = data['human_judgement'].corr(data[brain_measure])
|
| 447 |
+
corr_hm = data['human_judgement'].corr(ml_data)
|
| 448 |
+
corr_bm = data[brain_measure].corr(ml_data)
|
| 449 |
+
|
| 450 |
+
brain_name = brain_measure.replace('cosine_similarity_roi_values_', '').replace('pearson_correlation_roi_values_', '').title()
|
| 451 |
+
measure_type = "Cosine" if "cosine" in brain_measure else "Pearson"
|
| 452 |
+
|
| 453 |
+
# Calculate min/max correlations across all models for comparison
|
| 454 |
+
all_human_ml_corrs = []
|
| 455 |
+
all_brain_ml_corrs = []
|
| 456 |
+
for model_col in self.data_loader.ml_models:
|
| 457 |
+
human_ml_corr = data['human_judgement'].corr(data[model_col])
|
| 458 |
+
brain_ml_corr = data[brain_measure].corr(data[model_col])
|
| 459 |
+
all_human_ml_corrs.append(human_ml_corr)
|
| 460 |
+
all_brain_ml_corrs.append(brain_ml_corr)
|
| 461 |
+
|
| 462 |
+
min_human_ml = min(all_human_ml_corrs)
|
| 463 |
+
max_human_ml = max(all_human_ml_corrs)
|
| 464 |
+
min_brain_ml = min(all_brain_ml_corrs)
|
| 465 |
+
max_brain_ml = max(all_brain_ml_corrs)
|
| 466 |
+
|
| 467 |
+
stats_text = f"""## Current Analysis
|
| 468 |
+
|
| 469 |
+
### Correlation Results
|
| 470 |
+
| Comparison | Correlation |
|
| 471 |
+
|------------|-------------|
|
| 472 |
+
| Human ↔ Brain ({measure_type}) | **{corr_hb:.3f}** |
|
| 473 |
+
| Human ↔ ML Model | **{corr_hm:.3f}** |
|
| 474 |
+
| Brain ({measure_type}) ↔ ML Model | **{corr_bm:.3f}** |
|
| 475 |
+
|
| 476 |
+
### Dataset Correlation Ranges
|
| 477 |
+
| Comparison | Min | Max |
|
| 478 |
+
|------------|-----|-----|
|
| 479 |
+
| Human ↔ All ML Models | {min_human_ml:.3f} | {max_human_ml:.3f} |
|
| 480 |
+
| Brain ({measure_type}) ↔ All ML Models | {min_brain_ml:.3f} | {max_brain_ml:.3f} |
|
| 481 |
+
|
| 482 |
+
### Dataset Information
|
| 483 |
+
- **Total Image Pairs:** {len(data):,}
|
| 484 |
+
- **Available ML Models:** {len(self.data_loader.ml_models)}
|
| 485 |
+
- **Brain Measure:** {measure_type} {brain_name}
|
| 486 |
+
"""
|
| 487 |
+
return stats_text
|
| 488 |
+
except Exception as e:
|
| 489 |
+
return f"**Error:** {e}"
|
| 490 |
+
|
| 491 |
+
def get_model_rankings_for_pair(self, row_index):
|
| 492 |
+
"""Get top 3 best and worst models for each category for a specific image pair"""
|
| 493 |
+
try:
|
| 494 |
+
data = self.data_loader.data
|
| 495 |
+
if row_index >= len(data):
|
| 496 |
+
return {}
|
| 497 |
+
|
| 498 |
+
row = data.iloc[row_index]
|
| 499 |
+
rankings = {}
|
| 500 |
+
|
| 501 |
+
# Get rankings for each category
|
| 502 |
+
for category in ['vision', 'language', 'semantic']:
|
| 503 |
+
if not self.data_loader.model_categories[category]:
|
| 504 |
+
continue
|
| 505 |
+
|
| 506 |
+
# Get all models in this category with their scores
|
| 507 |
+
category_models = [model[0] for model in self.data_loader.model_categories[category]]
|
| 508 |
+
model_scores = [(model, row[model]) for model in category_models]
|
| 509 |
+
|
| 510 |
+
# Sort by score (highest first)
|
| 511 |
+
model_scores.sort(key=lambda x: x[1], reverse=True)
|
| 512 |
+
|
| 513 |
+
# Get top 3 and bottom 3
|
| 514 |
+
top_3 = model_scores[:3]
|
| 515 |
+
bottom_3 = model_scores[-3:]
|
| 516 |
+
|
| 517 |
+
rankings[category] = {
|
| 518 |
+
'best': top_3,
|
| 519 |
+
'worst': bottom_3
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
return rankings
|
| 523 |
+
except Exception as e:
|
| 524 |
+
print(f"Error getting model rankings: {e}")
|
| 525 |
+
return {}
|
| 526 |
+
|
| 527 |
+
def show_image_pair(self, row_index):
|
| 528 |
+
"""Show specific image pair with details, captions, and model rankings"""
|
| 529 |
+
try:
|
| 530 |
+
data = self.data_loader.data
|
| 531 |
+
if row_index >= len(data):
|
| 532 |
+
return None, None, "Invalid row index", None
|
| 533 |
+
|
| 534 |
+
row = data.iloc[row_index]
|
| 535 |
+
img1, img2 = self.image_viewer.get_image_pair(data, row_index)
|
| 536 |
+
|
| 537 |
+
# Get model rankings for this specific pair
|
| 538 |
+
rankings = self.get_model_rankings_for_pair(row_index)
|
| 539 |
+
|
| 540 |
+
# Get captions
|
| 541 |
+
caption1 = row.get('image_1_description', 'No caption available')
|
| 542 |
+
caption2 = row.get('image_2_description', 'No caption available')
|
| 543 |
+
|
| 544 |
+
info_text = f"""## Image Pair #{row_index}
|
| 545 |
+
|
| 546 |
+
**Images:** `{row['image_1']}` vs `{row['image_2']}`
|
| 547 |
+
|
| 548 |
+
### Image Captions
|
| 549 |
+
**Image 1:** {caption1}
|
| 550 |
+
|
| 551 |
+
**Image 2:** {caption2}
|
| 552 |
+
|
| 553 |
+
### Similarity Scores
|
| 554 |
+
| Measure | Score |
|
| 555 |
+
|---------|-------|
|
| 556 |
+
| Human Rating | {row['human_judgement']:.3f}/6 |
|
| 557 |
+
| Brain (Cosine Common) | {row.get('cosine_similarity_roi_values_common', 0):.3f} |
|
| 558 |
+
| Brain (Cosine Early) | {row.get('cosine_similarity_roi_values_early', 0):.3f} |
|
| 559 |
+
| Brain (Cosine Late) | {row.get('cosine_similarity_roi_values_late', 0):.3f} |
|
| 560 |
+
| Brain (Pearson Common) | {row.get('pearson_correlation_roi_values_common', 0):.3f} |
|
| 561 |
+
| Brain (Pearson Early) | {row.get('pearson_correlation_roi_values_early', 0):.3f} |
|
| 562 |
+
| Brain (Pearson Late) | {row.get('pearson_correlation_roi_values_late', 0):.3f} |
|
| 563 |
+
|
| 564 |
+
"""
|
| 565 |
+
|
| 566 |
+
# Add model rankings for each category
|
| 567 |
+
for category in ['vision', 'language', 'semantic']:
|
| 568 |
+
if category in rankings:
|
| 569 |
+
category_name = category.title()
|
| 570 |
+
info_text += f"### Top 3 Best {category_name} Models\n"
|
| 571 |
+
for i, (model, score) in enumerate(rankings[category]['best'], 1):
|
| 572 |
+
clean_name = model.replace('BOLD5000_timm_', '').replace('_sim_partial', '') if 'BOLD5000_timm_' in model else model
|
| 573 |
+
info_text += f"{i}. {clean_name}: {score:.3f}\n"
|
| 574 |
+
|
| 575 |
+
info_text += f"\n### Top 3 Worst {category_name} Models\n"
|
| 576 |
+
for i, (model, score) in enumerate(rankings[category]['worst'], 1):
|
| 577 |
+
clean_name = model.replace('BOLD5000_timm_', '').replace('_sim_partial', '') if 'BOLD5000_timm_' in model else model
|
| 578 |
+
info_text += f"{i}. {clean_name}: {score:.3f}\n"
|
| 579 |
+
info_text += "\n"
|
| 580 |
+
|
| 581 |
+
# Create ROI comparison plot
|
| 582 |
+
roi_plot = self.roi_visualizer.create_roi_comparison_plot(data, row_index)
|
| 583 |
+
|
| 584 |
+
return img1, img2, info_text, roi_plot
|
| 585 |
+
|
| 586 |
+
except Exception as e:
|
| 587 |
+
return None, None, f"**Error:** {e}", None
|
| 588 |
+
|
| 589 |
+
def create_interface(self):
|
| 590 |
+
"""Create the Gradio interface"""
|
| 591 |
+
brain_options = self.data_loader.get_brain_measure_options()
|
| 592 |
+
ml_options = self.data_loader.get_ml_model_options()
|
| 593 |
+
|
| 594 |
+
with gr.Blocks(title="Enhanced Image Similarity Analysis", theme=gr.themes.Soft()) as interface:
|
| 595 |
+
gr.Markdown("# Enhanced Image Similarity Analysis")
|
| 596 |
+
gr.Markdown("Compare human judgments, brain responses (Cosine & Pearson), and ML model similarities with captions and ROI visualization")
|
| 597 |
+
|
| 598 |
+
with gr.Row():
|
| 599 |
+
with gr.Column(scale=1):
|
| 600 |
+
gr.Markdown("### Controls")
|
| 601 |
+
|
| 602 |
+
brain_dropdown = gr.Dropdown(
|
| 603 |
+
choices=brain_options,
|
| 604 |
+
value=brain_options[0][1] if brain_options else None,
|
| 605 |
+
label="Brain Response Type",
|
| 606 |
+
info="Choose between Cosine similarity and Pearson correlation"
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
ml_dropdown = gr.Dropdown(
|
| 610 |
+
choices=ml_options,
|
| 611 |
+
value=ml_options[0][1] if ml_options else None,
|
| 612 |
+
label="ML Model",
|
| 613 |
+
info="Individual models or category averages"
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
update_btn = gr.Button("Update Plots", variant="primary")
|
| 617 |
+
stats_display = gr.Markdown("Select parameters and click 'Update Plots'")
|
| 618 |
+
|
| 619 |
+
with gr.Column(scale=2):
|
| 620 |
+
gr.Markdown("### 3D Visualization")
|
| 621 |
+
plot_3d = gr.Plot(label="3D Scatter Plot")
|
| 622 |
+
|
| 623 |
+
with gr.Row():
|
| 624 |
+
with gr.Column():
|
| 625 |
+
gr.Markdown("### 2D Pairwise Comparisons")
|
| 626 |
+
plot_2d = gr.Plot(label="2D Scatter Plots", show_label=False)
|
| 627 |
+
|
| 628 |
+
# Enhanced image viewer section with captions and ROI visualization
|
| 629 |
+
with gr.Row():
|
| 630 |
+
with gr.Column(scale=1):
|
| 631 |
+
gr.Markdown("### Image Pair Viewer")
|
| 632 |
+
|
| 633 |
+
with gr.Row():
|
| 634 |
+
row_input = gr.Number(
|
| 635 |
+
value=0,
|
| 636 |
+
label="Image Pair Index",
|
| 637 |
+
info=f"Enter 0 to {len(self.data_loader.data)-1}",
|
| 638 |
+
precision=0
|
| 639 |
+
)
|
| 640 |
+
show_btn = gr.Button("Show Images & ROI", variant="secondary", size="sm")
|
| 641 |
+
|
| 642 |
+
image_info = gr.Markdown("Enter an index and click 'Show Images & ROI' to see details")
|
| 643 |
+
|
| 644 |
+
with gr.Column(scale=1):
|
| 645 |
+
gr.Markdown("### Image 1")
|
| 646 |
+
image1_display = gr.Image(label="Image 1", height=200)
|
| 647 |
+
|
| 648 |
+
with gr.Column(scale=1):
|
| 649 |
+
gr.Markdown("### Image 2")
|
| 650 |
+
image2_display = gr.Image(label="Image 2", height=200)
|
| 651 |
+
|
| 652 |
+
# ROI visualization section
|
| 653 |
+
with gr.Row():
|
| 654 |
+
with gr.Column():
|
| 655 |
+
gr.Markdown("### ROI Brain Activation Comparison")
|
| 656 |
+
roi_plot = gr.Plot(label="Side-by-Side ROI Values", show_label=False)
|
| 657 |
+
|
| 658 |
+
# Event handlers
|
| 659 |
+
def update_all(brain_measure, ml_model_selection):
|
| 660 |
+
# Filter out non-selectable options
|
| 661 |
+
if ml_model_selection == "separator":
|
| 662 |
+
return None, None, "Please select a valid model or average option"
|
| 663 |
+
|
| 664 |
+
fig_3d, fig_2d = self.update_plots(brain_measure, ml_model_selection)
|
| 665 |
+
stats = self.get_correlations(brain_measure, ml_model_selection)
|
| 666 |
+
return fig_3d, fig_2d, stats
|
| 667 |
+
|
| 668 |
+
def show_images_and_roi(row_idx):
|
| 669 |
+
img1, img2, info, roi_fig = self.show_image_pair(int(row_idx) if row_idx is not None else 0)
|
| 670 |
+
return img1, img2, info, roi_fig
|
| 671 |
+
|
| 672 |
+
# Connect events
|
| 673 |
+
update_btn.click(
|
| 674 |
+
fn=update_all,
|
| 675 |
+
inputs=[brain_dropdown, ml_dropdown],
|
| 676 |
+
outputs=[plot_3d, plot_2d, stats_display]
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
brain_dropdown.change(
|
| 680 |
+
fn=update_all,
|
| 681 |
+
inputs=[brain_dropdown, ml_dropdown],
|
| 682 |
+
outputs=[plot_3d, plot_2d, stats_display]
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
ml_dropdown.change(
|
| 686 |
+
fn=update_all,
|
| 687 |
+
inputs=[brain_dropdown, ml_dropdown],
|
| 688 |
+
outputs=[plot_3d, plot_2d, stats_display]
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
show_btn.click(
|
| 692 |
+
fn=show_images_and_roi,
|
| 693 |
+
inputs=[row_input],
|
| 694 |
+
outputs=[image1_display, image2_display, image_info, roi_plot]
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
row_input.change(
|
| 698 |
+
fn=show_images_and_roi,
|
| 699 |
+
inputs=[row_input],
|
| 700 |
+
outputs=[image1_display, image2_display, image_info, roi_plot]
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
return interface
|
| 704 |
+
|
| 705 |
+
def main():
|
| 706 |
+
"""Main function to run the application"""
|
| 707 |
+
try:
|
| 708 |
+
# Update this to use your new CSV file
|
| 709 |
+
app = SimilarityApp('overall_database.csv')
|
| 710 |
+
interface = app.create_interface()
|
| 711 |
+
interface.launch(
|
| 712 |
+
server_name="localhost",
|
| 713 |
+
server_port=7860,
|
| 714 |
+
share=False,
|
| 715 |
+
debug=True
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
except Exception as e:
|
| 719 |
+
print(f"Error starting application: {e}")
|
| 720 |
+
|
| 721 |
+
if __name__ == "__main__":
|
| 722 |
+
main()
|
visualization/__pycache__/image_viewer.cpython-311.pyc
ADDED
|
Binary file (2.82 kB). View file
|
|
|
visualization/__pycache__/image_viewer.cpython-39.pyc
ADDED
|
Binary file (1.68 kB). View file
|
|
|
visualization/__pycache__/plot_generator.cpython-311.pyc
ADDED
|
Binary file (9.77 kB). View file
|
|
|
visualization/__pycache__/plot_generator.cpython-39.pyc
ADDED
|
Binary file (5.59 kB). View file
|
|
|
visualization/image_viewer.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==================== visualization/image_viewer.py ====================
|
| 2 |
+
"""Image loading and display functionality"""
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import requests
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
from typing import Optional, Tuple
|
| 9 |
+
|
| 10 |
+
class ImageViewer:
|
| 11 |
+
"""Handles image loading and display"""
|
| 12 |
+
|
| 13 |
+
@staticmethod
|
| 14 |
+
def load_image_from_url(url: str, max_size: Tuple[int, int] = (200, 200)) -> Optional[Image.Image]:
|
| 15 |
+
"""Load and resize image from URL"""
|
| 16 |
+
try:
|
| 17 |
+
response = requests.get(url, timeout=10)
|
| 18 |
+
img = Image.open(BytesIO(response.content))
|
| 19 |
+
img.thumbnail(max_size, Image.Resampling.LANCZOS)
|
| 20 |
+
return img
|
| 21 |
+
except Exception as e:
|
| 22 |
+
print(f"Error loading image {url}: {e}")
|
| 23 |
+
placeholder = Image.new('RGB', (200, 200), color='gray')
|
| 24 |
+
return placeholder
|
| 25 |
+
|
| 26 |
+
@staticmethod
|
| 27 |
+
def get_image_pair(data: pd.DataFrame, row_index: int) -> Tuple[Optional[Image.Image], Optional[Image.Image]]:
|
| 28 |
+
"""Get image pair for a specific row"""
|
| 29 |
+
if row_index >= len(data):
|
| 30 |
+
return None, None
|
| 31 |
+
|
| 32 |
+
row = data.iloc[row_index]
|
| 33 |
+
img1_url = row.get('stim_1', '')
|
| 34 |
+
img2_url = row.get('stim_2', '')
|
| 35 |
+
|
| 36 |
+
img1 = ImageViewer.load_image_from_url(img1_url) if img1_url else None
|
| 37 |
+
img2 = ImageViewer.load_image_from_url(img2_url) if img2_url else None
|
| 38 |
+
|
| 39 |
+
return img1, img2
|
visualization/plot_generator.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# visualization/plot_generator.py
|
| 2 |
+
"""Main plotting functionality for similarity analysis"""
|
| 3 |
+
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import plotly.graph_objects as go
|
| 6 |
+
from plotly.subplots import make_subplots
|
| 7 |
+
from typing import Tuple, Optional, Union
|
| 8 |
+
import sys
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# Add parent directory to path for imports
|
| 12 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 13 |
+
|
| 14 |
+
class PlotGenerator:
|
| 15 |
+
"""Handles creation of plotly visualizations"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, data_loader):
|
| 18 |
+
self.data_loader = data_loader
|
| 19 |
+
|
| 20 |
+
def get_model_data(self, ml_model_selection: Union[str, int]) -> Tuple[pd.Series, str]:
|
| 21 |
+
"""Get model data - either individual model or category average"""
|
| 22 |
+
data = self.data_loader.data
|
| 23 |
+
|
| 24 |
+
# Handle category averages
|
| 25 |
+
if ml_model_selection == "avg_vision":
|
| 26 |
+
vision_models = [model[0] for model in self.data_loader.model_categories['vision']]
|
| 27 |
+
if vision_models:
|
| 28 |
+
avg_data = data[vision_models].mean(axis=1)
|
| 29 |
+
return avg_data, "Vision Models (Average)"
|
| 30 |
+
else:
|
| 31 |
+
raise ValueError("No vision models available")
|
| 32 |
+
|
| 33 |
+
elif ml_model_selection == "avg_language":
|
| 34 |
+
language_models = [model[0] for model in self.data_loader.model_categories['language']]
|
| 35 |
+
if language_models:
|
| 36 |
+
avg_data = data[language_models].mean(axis=1)
|
| 37 |
+
return avg_data, "Language Models (Average)"
|
| 38 |
+
else:
|
| 39 |
+
raise ValueError("No language models available")
|
| 40 |
+
|
| 41 |
+
elif ml_model_selection == "avg_semantic":
|
| 42 |
+
semantic_models = [model[0] for model in self.data_loader.model_categories['semantic']]
|
| 43 |
+
if semantic_models:
|
| 44 |
+
avg_data = data[semantic_models].mean(axis=1)
|
| 45 |
+
return avg_data, "Semantic Models (Average)"
|
| 46 |
+
else:
|
| 47 |
+
raise ValueError("No semantic models available")
|
| 48 |
+
|
| 49 |
+
# Handle individual models
|
| 50 |
+
elif isinstance(ml_model_selection, int):
|
| 51 |
+
ml_column = self.data_loader.ml_models[ml_model_selection]
|
| 52 |
+
return data[ml_column], ml_column
|
| 53 |
+
|
| 54 |
+
else:
|
| 55 |
+
raise ValueError(f"Invalid model selection: {ml_model_selection}")
|
| 56 |
+
|
| 57 |
+
def create_3d_plot(self, brain_measure: str, ml_model_selection: Union[str, int]) -> Optional[go.Figure]:
|
| 58 |
+
"""Create 3D scatter plot"""
|
| 59 |
+
data = self.data_loader.data
|
| 60 |
+
|
| 61 |
+
try:
|
| 62 |
+
ml_data, ml_name = self.get_model_data(ml_model_selection)
|
| 63 |
+
except ValueError as e:
|
| 64 |
+
print(f"Error getting model data: {e}")
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
# Create hover text
|
| 68 |
+
hover_text = []
|
| 69 |
+
for idx, row in data.iterrows():
|
| 70 |
+
text = f"Pair #{idx}<br>"
|
| 71 |
+
text += f"Images: {row['image_1']} vs {row['image_2']}<br>"
|
| 72 |
+
text += f"Human: {row['human_judgement']:.3f}<br>"
|
| 73 |
+
text += f"Brain: {row[brain_measure]:.3f}<br>"
|
| 74 |
+
text += f"ML: {ml_data.iloc[idx]:.3f}"
|
| 75 |
+
hover_text.append(text)
|
| 76 |
+
|
| 77 |
+
fig = go.Figure(data=go.Scatter3d(
|
| 78 |
+
x=data['human_judgement'],
|
| 79 |
+
y=data[brain_measure],
|
| 80 |
+
z=ml_data,
|
| 81 |
+
mode='markers',
|
| 82 |
+
marker=dict(
|
| 83 |
+
size=6,
|
| 84 |
+
color=data['human_judgement'],
|
| 85 |
+
colorscale='Viridis',
|
| 86 |
+
opacity=0.7,
|
| 87 |
+
colorbar=dict(title="Human Rating")
|
| 88 |
+
),
|
| 89 |
+
text=hover_text,
|
| 90 |
+
hovertemplate='%{text}<extra></extra>',
|
| 91 |
+
customdata=data[['image_1', 'image_2', 'stim_1', 'stim_2']].values
|
| 92 |
+
))
|
| 93 |
+
|
| 94 |
+
brain_name = brain_measure.replace('cosine_similarity_roi_values_', '').replace('pearson_correlation_roi_values_', '').title()
|
| 95 |
+
measure_type = "Cosine" if "cosine" in brain_measure else "Pearson"
|
| 96 |
+
|
| 97 |
+
fig.update_layout(
|
| 98 |
+
title=f'3D Analysis: Human vs {measure_type} {brain_name} Brain vs {ml_name}',
|
| 99 |
+
scene=dict(
|
| 100 |
+
xaxis_title='Human Rating (0-5)',
|
| 101 |
+
yaxis_title=f'Brain Similarity ({measure_type} {brain_name})',
|
| 102 |
+
zaxis_title=f'ML Model: {ml_name}',
|
| 103 |
+
camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))
|
| 104 |
+
),
|
| 105 |
+
width=800,
|
| 106 |
+
height=600
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
return fig
|
| 110 |
+
|
| 111 |
+
def create_2d_plots(self, brain_measure: str, ml_model_selection: Union[str, int]) -> Optional[go.Figure]:
|
| 112 |
+
"""Create three 2D scatter plots"""
|
| 113 |
+
data = self.data_loader.data
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
ml_data, ml_name = self.get_model_data(ml_model_selection)
|
| 117 |
+
except ValueError as e:
|
| 118 |
+
print(f"Error getting model data: {e}")
|
| 119 |
+
return None
|
| 120 |
+
|
| 121 |
+
# Calculate correlations
|
| 122 |
+
corr_hb = data['human_judgement'].corr(data[brain_measure])
|
| 123 |
+
corr_hm = data['human_judgement'].corr(ml_data)
|
| 124 |
+
corr_bm = data[brain_measure].corr(ml_data)
|
| 125 |
+
|
| 126 |
+
brain_name = brain_measure.replace('cosine_similarity_roi_values_', '').replace('pearson_correlation_roi_values_', '').title()
|
| 127 |
+
measure_type = "Cosine" if "cosine" in brain_measure else "Pearson"
|
| 128 |
+
|
| 129 |
+
# Create subplot
|
| 130 |
+
fig = make_subplots(
|
| 131 |
+
rows=1, cols=3,
|
| 132 |
+
subplot_titles=[
|
| 133 |
+
f'Human vs Brain (r={corr_hb:.3f})',
|
| 134 |
+
f'Human vs ML (r={corr_hm:.3f})',
|
| 135 |
+
f'Brain vs ML (r={corr_bm:.3f})'
|
| 136 |
+
],
|
| 137 |
+
horizontal_spacing=0.1
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# Custom data for hover
|
| 141 |
+
customdata = [[idx, row['image_1'], row['image_2']] for idx, row in data.iterrows()]
|
| 142 |
+
|
| 143 |
+
# Add scatter plots with proper labels
|
| 144 |
+
plot_configs = [
|
| 145 |
+
{
|
| 146 |
+
'x': data['human_judgement'],
|
| 147 |
+
'y': data[brain_measure],
|
| 148 |
+
'color': 'blue',
|
| 149 |
+
'x_label': 'Human',
|
| 150 |
+
'y_label': f'Brain ({measure_type})'
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
'x': data['human_judgement'],
|
| 154 |
+
'y': ml_data,
|
| 155 |
+
'color': 'red',
|
| 156 |
+
'x_label': 'Human',
|
| 157 |
+
'y_label': 'ML Model'
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
'x': data[brain_measure],
|
| 161 |
+
'y': ml_data,
|
| 162 |
+
'color': 'green',
|
| 163 |
+
'x_label': f'Brain ({measure_type})',
|
| 164 |
+
'y_label': 'ML Model'
|
| 165 |
+
}
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
for i, config in enumerate(plot_configs):
|
| 169 |
+
fig.add_trace(
|
| 170 |
+
go.Scatter(
|
| 171 |
+
x=config['x'],
|
| 172 |
+
y=config['y'],
|
| 173 |
+
mode='markers',
|
| 174 |
+
marker=dict(color=config['color'], opacity=0.6, size=3),
|
| 175 |
+
hovertemplate=f'Pair #%{{customdata[0]}}<br>{config["x_label"]}: %{{x:.3f}}<br>{config["y_label"]}: %{{y:.3f}}<br>%{{customdata[1]}} vs %{{customdata[2]}}<extra></extra>',
|
| 176 |
+
customdata=customdata,
|
| 177 |
+
showlegend=False
|
| 178 |
+
),
|
| 179 |
+
row=1, col=i+1
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
fig.update_layout(
|
| 183 |
+
title=f'2D Comparisons: {measure_type} {brain_name} Brain vs {ml_name}',
|
| 184 |
+
width=1300,
|
| 185 |
+
height=500,
|
| 186 |
+
margin=dict(l=60, r=60, t=80, b=80)
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
return fig
|