Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .github/ISSUE_TEMPLATE/bug_report.md +27 -0
- .github/ISSUE_TEMPLATE/experiment.md +22 -0
- .github/ISSUE_TEMPLATE/feature_request.md +19 -0
- .github/PULL_REQUEST_TEMPLATE.md +15 -0
- .github/workflows/ci.yml +28 -0
- .gitignore +17 -0
- CONTRIBUTING.md +110 -0
- LICENSE +407 -0
- MODEL_CARD.md +131 -0
- NOTICE +75 -0
- README.md +136 -3
- examples/quick_start.py +72 -0
- pyproject.toml +85 -0
- src/cortexlab/__init__.py +20 -0
- src/cortexlab/analysis/__init__.py +4 -0
- src/cortexlab/analysis/brain_alignment.py +223 -0
- src/cortexlab/analysis/cognitive_load.py +276 -0
- src/cortexlab/core/__init__.py +3 -0
- src/cortexlab/core/attention.py +117 -0
- src/cortexlab/core/model.py +288 -0
- src/cortexlab/core/subject.py +177 -0
- src/cortexlab/data/__init__.py +15 -0
- src/cortexlab/data/fmri.py +248 -0
- src/cortexlab/data/loader.py +318 -0
- src/cortexlab/data/studies/__init__.py +10 -0
- src/cortexlab/data/studies/algonauts2025.py +315 -0
- src/cortexlab/data/studies/lahner2024bold.py +293 -0
- src/cortexlab/data/studies/lebel2023bold.py +344 -0
- src/cortexlab/data/studies/wen2017.py +78 -0
- src/cortexlab/data/transforms.py +273 -0
- src/cortexlab/inference/__init__.py +4 -0
- src/cortexlab/inference/attribution.py +93 -0
- src/cortexlab/inference/predictor.py +392 -0
- src/cortexlab/inference/streaming.py +158 -0
- src/cortexlab/training/__init__.py +3 -0
- src/cortexlab/training/experiment.py +651 -0
- src/cortexlab/training/pl_module.py +155 -0
- src/cortexlab/viz/__init__.py +26 -0
- src/cortexlab/viz/base.py +497 -0
- src/cortexlab/viz/cortical.py +311 -0
- src/cortexlab/viz/cortical_pv.py +280 -0
- src/cortexlab/viz/subcortical.py +311 -0
- src/cortexlab/viz/utils.py +563 -0
- tests/__init__.py +0 -0
- tests/conftest.py +8 -0
- tests/test_attention.py +78 -0
- tests/test_attribution.py +136 -0
- tests/test_brain_alignment.py +91 -0
- tests/test_cognitive_load.py +100 -0
- tests/test_model.py +111 -0
.github/ISSUE_TEMPLATE/bug_report.md
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
name: Bug Report
|
| 3 |
+
about: Report something that isn't working correctly
|
| 4 |
+
title: ''
|
| 5 |
+
labels: bug
|
| 6 |
+
assignees: ''
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Description
|
| 10 |
+
A clear description of the bug.
|
| 11 |
+
|
| 12 |
+
## Steps to Reproduce
|
| 13 |
+
1. ...
|
| 14 |
+
2. ...
|
| 15 |
+
3. ...
|
| 16 |
+
|
| 17 |
+
## Expected Behavior
|
| 18 |
+
What you expected to happen.
|
| 19 |
+
|
| 20 |
+
## Actual Behavior
|
| 21 |
+
What actually happened. Include the full error traceback if applicable.
|
| 22 |
+
|
| 23 |
+
## Environment
|
| 24 |
+
- OS:
|
| 25 |
+
- Python version:
|
| 26 |
+
- PyTorch version:
|
| 27 |
+
- CortexLab version:
|
.github/ISSUE_TEMPLATE/experiment.md
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
name: Experiment Proposal
|
| 3 |
+
about: Propose a research experiment or evaluation
|
| 4 |
+
title: 'Experiment: '
|
| 5 |
+
labels: experiment
|
| 6 |
+
assignees: ''
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Objective
|
| 10 |
+
What question does this experiment answer?
|
| 11 |
+
|
| 12 |
+
## Method
|
| 13 |
+
Step-by-step description of the experimental setup.
|
| 14 |
+
|
| 15 |
+
## Data Requirements
|
| 16 |
+
What datasets or stimuli are needed?
|
| 17 |
+
|
| 18 |
+
## Expected Outcome
|
| 19 |
+
What do you expect to find, and why?
|
| 20 |
+
|
| 21 |
+
## Success Criteria
|
| 22 |
+
How will you know if the experiment succeeded?
|
.github/ISSUE_TEMPLATE/feature_request.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
name: Feature Request
|
| 3 |
+
about: Suggest a new feature or enhancement
|
| 4 |
+
title: ''
|
| 5 |
+
labels: enhancement
|
| 6 |
+
assignees: ''
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Description
|
| 10 |
+
What feature would you like to see?
|
| 11 |
+
|
| 12 |
+
## Use Case
|
| 13 |
+
Why is this useful? What problem does it solve?
|
| 14 |
+
|
| 15 |
+
## Proposed Approach
|
| 16 |
+
If you have ideas on how to implement this, describe them here.
|
| 17 |
+
|
| 18 |
+
## Alternatives Considered
|
| 19 |
+
Any alternative solutions you've thought about.
|
.github/PULL_REQUEST_TEMPLATE.md
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Summary
|
| 2 |
+
Brief description of what this PR does.
|
| 3 |
+
|
| 4 |
+
## Changes
|
| 5 |
+
- ...
|
| 6 |
+
- ...
|
| 7 |
+
|
| 8 |
+
## Testing
|
| 9 |
+
- [ ] All existing tests pass (`pytest tests/ -v`)
|
| 10 |
+
- [ ] Lint passes (`ruff check src/ tests/`)
|
| 11 |
+
- [ ] New tests added for new functionality
|
| 12 |
+
- [ ] Tested manually with example script
|
| 13 |
+
|
| 14 |
+
## Related Issues
|
| 15 |
+
Closes #
|
.github/workflows/ci.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: CI
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [main]
|
| 6 |
+
pull_request:
|
| 7 |
+
branches: [main]
|
| 8 |
+
|
| 9 |
+
jobs:
|
| 10 |
+
lint:
|
| 11 |
+
runs-on: ubuntu-latest
|
| 12 |
+
steps:
|
| 13 |
+
- uses: actions/checkout@v4
|
| 14 |
+
- uses: actions/setup-python@v5
|
| 15 |
+
with:
|
| 16 |
+
python-version: "3.10"
|
| 17 |
+
- run: pip install ruff
|
| 18 |
+
- run: ruff check src/ tests/
|
| 19 |
+
|
| 20 |
+
test:
|
| 21 |
+
runs-on: ubuntu-latest
|
| 22 |
+
steps:
|
| 23 |
+
- uses: actions/checkout@v4
|
| 24 |
+
- uses: actions/setup-python@v5
|
| 25 |
+
with:
|
| 26 |
+
python-version: "3.10"
|
| 27 |
+
- run: pip install -e ".[dev]"
|
| 28 |
+
- run: pytest tests/ -v --tb=short
|
.gitignore
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.py[cod]
|
| 3 |
+
*.egg-info/
|
| 4 |
+
dist/
|
| 5 |
+
build/
|
| 6 |
+
.eggs/
|
| 7 |
+
*.egg
|
| 8 |
+
.pytest_cache/
|
| 9 |
+
.ruff_cache/
|
| 10 |
+
.venv/
|
| 11 |
+
venv/
|
| 12 |
+
*.ckpt
|
| 13 |
+
*.pt
|
| 14 |
+
*.pth
|
| 15 |
+
cache/
|
| 16 |
+
wandb/
|
| 17 |
+
.env
|
CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to CortexLab
|
| 2 |
+
|
| 3 |
+
Thanks for your interest in contributing to CortexLab! This guide will help you get started.
|
| 4 |
+
|
| 5 |
+
## Getting Started
|
| 6 |
+
|
| 7 |
+
### 1. Fork and clone
|
| 8 |
+
|
| 9 |
+
```bash
|
| 10 |
+
git clone https://github.com/YOUR_USERNAME/cortexlab.git
|
| 11 |
+
cd cortexlab
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
### 2. Set up the development environment
|
| 15 |
+
|
| 16 |
+
```bash
|
| 17 |
+
python -m venv .venv
|
| 18 |
+
source .venv/bin/activate # or .venv\Scripts\activate on Windows
|
| 19 |
+
pip install -e ".[dev,analysis]"
|
| 20 |
+
```
|
| 21 |
+
|
| 22 |
+
### 3. Verify everything works
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
pytest tests/ -v
|
| 26 |
+
ruff check src/ tests/
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
## Development Workflow
|
| 30 |
+
|
| 31 |
+
1. **Create a branch** from `master` for your work:
|
| 32 |
+
```bash
|
| 33 |
+
git checkout -b feature/your-feature-name
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
2. **Make your changes** - keep commits focused and atomic.
|
| 37 |
+
|
| 38 |
+
3. **Run tests and lint** before committing:
|
| 39 |
+
```bash
|
| 40 |
+
pytest tests/ -v
|
| 41 |
+
ruff check src/ tests/
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
4. **Open a pull request** with a clear description of what you changed and why.
|
| 45 |
+
|
| 46 |
+
## Project Structure
|
| 47 |
+
|
| 48 |
+
```
|
| 49 |
+
src/cortexlab/
|
| 50 |
+
core/ Model architecture, attention extraction, subject adaptation
|
| 51 |
+
data/ Dataset loading, transforms, HCP ROI utilities
|
| 52 |
+
training/ PyTorch Lightning training pipeline
|
| 53 |
+
inference/ Predictor, streaming, modality attribution
|
| 54 |
+
analysis/ Brain-alignment benchmark, cognitive load scorer
|
| 55 |
+
viz/ Brain surface visualization
|
| 56 |
+
tests/ Unit tests (pytest)
|
| 57 |
+
examples/ Usage examples
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
## What to Work On
|
| 61 |
+
|
| 62 |
+
- Check [issues labeled `good first issue`](../../issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for beginner-friendly tasks
|
| 63 |
+
- Check [issues labeled `help wanted`](../../issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) for tasks where we need help
|
| 64 |
+
- Look at the [experiment issues](../../issues?q=is%3Aissue+is%3Aopen+label%3Aexperiment) if you want to run evaluations
|
| 65 |
+
|
| 66 |
+
## Code Style
|
| 67 |
+
|
| 68 |
+
- **Linting**: We use [ruff](https://docs.astral.sh/ruff/) with a 100-character line limit
|
| 69 |
+
- **Tests**: Write pytest tests for new functionality. Use synthetic data (no real fMRI needed)
|
| 70 |
+
- **Docstrings**: Use NumPy-style docstrings for public functions
|
| 71 |
+
- **Imports**: Let ruff sort imports automatically (`ruff check --fix`)
|
| 72 |
+
|
| 73 |
+
## Writing Tests
|
| 74 |
+
|
| 75 |
+
Tests use synthetic data and mock objects so you don't need real fMRI datasets or GPU access:
|
| 76 |
+
|
| 77 |
+
```python
|
| 78 |
+
import torch
|
| 79 |
+
from neuralset.dataloader import SegmentData
|
| 80 |
+
import neuralset.segments as seg
|
| 81 |
+
|
| 82 |
+
# Create dummy segments matching batch size
|
| 83 |
+
segments = [seg.Segment(start=float(i), duration=1.0, timeline="test") for i in range(batch_size)]
|
| 84 |
+
|
| 85 |
+
# Create synthetic batch
|
| 86 |
+
data = {"text": torch.randn(batch_size, 2, 32, seq_len), "subject_id": torch.zeros(batch_size, dtype=torch.long)}
|
| 87 |
+
batch = SegmentData(data=data, segments=segments)
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
## Adding New Features
|
| 91 |
+
|
| 92 |
+
If you're adding a new analysis method or inference capability:
|
| 93 |
+
|
| 94 |
+
1. Add the implementation in the appropriate subpackage
|
| 95 |
+
2. Export it from the subpackage's `__init__.py`
|
| 96 |
+
3. Write tests in `tests/test_yourfeature.py`
|
| 97 |
+
4. Add a usage example in the README or `examples/`
|
| 98 |
+
|
| 99 |
+
## Reporting Bugs
|
| 100 |
+
|
| 101 |
+
When filing a bug report, please include:
|
| 102 |
+
- Python version and OS
|
| 103 |
+
- PyTorch version
|
| 104 |
+
- Steps to reproduce
|
| 105 |
+
- Full error traceback
|
| 106 |
+
- What you expected to happen
|
| 107 |
+
|
| 108 |
+
## Questions?
|
| 109 |
+
|
| 110 |
+
Open an issue with the `question` label and we'll help out.
|
LICENSE
ADDED
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Attribution-NonCommercial 4.0 International
|
| 2 |
+
|
| 3 |
+
=======================================================================
|
| 4 |
+
|
| 5 |
+
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
| 6 |
+
does not provide legal services or legal advice. Distribution of
|
| 7 |
+
Creative Commons public licenses does not create a lawyer-client or
|
| 8 |
+
other relationship. Creative Commons makes its licenses and related
|
| 9 |
+
information available on an "as-is" basis. Creative Commons gives no
|
| 10 |
+
warranties regarding its licenses, any material licensed under their
|
| 11 |
+
terms and conditions, or any related information. Creative Commons
|
| 12 |
+
disclaims all liability for damages resulting from their use to the
|
| 13 |
+
fullest extent possible.
|
| 14 |
+
|
| 15 |
+
Using Creative Commons Public Licenses
|
| 16 |
+
|
| 17 |
+
Creative Commons public licenses provide a standard set of terms and
|
| 18 |
+
conditions that creators and other rights holders may use to share
|
| 19 |
+
original works of authorship and other material subject to copyright
|
| 20 |
+
and certain other rights specified in the public license below. The
|
| 21 |
+
following considerations are for informational purposes only, are not
|
| 22 |
+
exhaustive, and do not form part of our licenses.
|
| 23 |
+
|
| 24 |
+
Considerations for licensors: Our public licenses are
|
| 25 |
+
intended for use by those authorized to give the public
|
| 26 |
+
permission to use material in ways otherwise restricted by
|
| 27 |
+
copyright and certain other rights. Our licenses are
|
| 28 |
+
irrevocable. Licensors should read and understand the terms
|
| 29 |
+
and conditions of the license they choose before applying it.
|
| 30 |
+
Licensors should also secure all rights necessary before
|
| 31 |
+
applying our licenses so that the public can reuse the
|
| 32 |
+
material as expected. Licensors should clearly mark any
|
| 33 |
+
material not subject to the license. This includes other CC-
|
| 34 |
+
licensed material, or material used under an exception or
|
| 35 |
+
limitation to copyright. More considerations for licensors:
|
| 36 |
+
wiki.creativecommons.org/Considerations_for_licensors
|
| 37 |
+
|
| 38 |
+
Considerations for the public: By using one of our public
|
| 39 |
+
licenses, a licensor grants the public permission to use the
|
| 40 |
+
licensed material under specified terms and conditions. If
|
| 41 |
+
the licensor's permission is not necessary for any reason--for
|
| 42 |
+
example, because of any applicable exception or limitation to
|
| 43 |
+
copyright--then that use is not regulated by the license. Our
|
| 44 |
+
licenses grant only permissions under copyright and certain
|
| 45 |
+
other rights that a licensor has authority to grant. Use of
|
| 46 |
+
the licensed material may still be restricted for other
|
| 47 |
+
reasons, including because others have copyright or other
|
| 48 |
+
rights in the material. A licensor may make special requests,
|
| 49 |
+
such as asking that all changes be marked or described.
|
| 50 |
+
Although not required by our licenses, you are encouraged to
|
| 51 |
+
respect those requests where reasonable. More considerations
|
| 52 |
+
for the public:
|
| 53 |
+
wiki.creativecommons.org/Considerations_for_licensees
|
| 54 |
+
|
| 55 |
+
=======================================================================
|
| 56 |
+
|
| 57 |
+
Creative Commons Attribution-NonCommercial 4.0 International Public
|
| 58 |
+
License
|
| 59 |
+
|
| 60 |
+
By exercising the Licensed Rights (defined below), You accept and agree
|
| 61 |
+
to be bound by the terms and conditions of this Creative Commons
|
| 62 |
+
Attribution-NonCommercial 4.0 International Public License ("Public
|
| 63 |
+
License"). To the extent this Public License may be interpreted as a
|
| 64 |
+
contract, You are granted the Licensed Rights in consideration of Your
|
| 65 |
+
acceptance of these terms and conditions, and the Licensor grants You
|
| 66 |
+
such rights in consideration of benefits the Licensor receives from
|
| 67 |
+
making the Licensed Material available under these terms and
|
| 68 |
+
conditions.
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
Section 1 -- Definitions.
|
| 72 |
+
|
| 73 |
+
a. Adapted Material means material subject to Copyright and Similar
|
| 74 |
+
Rights that is derived from or based upon the Licensed Material
|
| 75 |
+
and in which the Licensed Material is translated, altered,
|
| 76 |
+
arranged, transformed, or otherwise modified in a manner requiring
|
| 77 |
+
permission under the Copyright and Similar Rights held by the
|
| 78 |
+
Licensor. For purposes of this Public License, where the Licensed
|
| 79 |
+
Material is a musical work, performance, or sound recording,
|
| 80 |
+
Adapted Material is always produced where the Licensed Material is
|
| 81 |
+
synched in timed relation with a moving image.
|
| 82 |
+
|
| 83 |
+
b. Adapter's License means the license You apply to Your Copyright
|
| 84 |
+
and Similar Rights in Your contributions to Adapted Material in
|
| 85 |
+
accordance with the terms and conditions of this Public License.
|
| 86 |
+
|
| 87 |
+
c. Copyright and Similar Rights means copyright and/or similar rights
|
| 88 |
+
closely related to copyright including, without limitation,
|
| 89 |
+
performance, broadcast, sound recording, and Sui Generis Database
|
| 90 |
+
Rights, without regard to how the rights are labeled or
|
| 91 |
+
categorized. For purposes of this Public License, the rights
|
| 92 |
+
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
| 93 |
+
Rights.
|
| 94 |
+
d. Effective Technological Measures means those measures that, in the
|
| 95 |
+
absence of proper authority, may not be circumvented under laws
|
| 96 |
+
fulfilling obligations under Article 11 of the WIPO Copyright
|
| 97 |
+
Treaty adopted on December 20, 1996, and/or similar international
|
| 98 |
+
agreements.
|
| 99 |
+
|
| 100 |
+
e. Exceptions and Limitations means fair use, fair dealing, and/or
|
| 101 |
+
any other exception or limitation to Copyright and Similar Rights
|
| 102 |
+
that applies to Your use of the Licensed Material.
|
| 103 |
+
|
| 104 |
+
f. Licensed Material means the artistic or literary work, database,
|
| 105 |
+
or other material to which the Licensor applied this Public
|
| 106 |
+
License.
|
| 107 |
+
|
| 108 |
+
g. Licensed Rights means the rights granted to You subject to the
|
| 109 |
+
terms and conditions of this Public License, which are limited to
|
| 110 |
+
all Copyright and Similar Rights that apply to Your use of the
|
| 111 |
+
Licensed Material and that the Licensor has authority to license.
|
| 112 |
+
|
| 113 |
+
h. Licensor means the individual(s) or entity(ies) granting rights
|
| 114 |
+
under this Public License.
|
| 115 |
+
|
| 116 |
+
i. NonCommercial means not primarily intended for or directed towards
|
| 117 |
+
commercial advantage or monetary compensation. For purposes of
|
| 118 |
+
this Public License, the exchange of the Licensed Material for
|
| 119 |
+
other material subject to Copyright and Similar Rights by digital
|
| 120 |
+
file-sharing or similar means is NonCommercial provided there is
|
| 121 |
+
no payment of monetary compensation in connection with the
|
| 122 |
+
exchange.
|
| 123 |
+
|
| 124 |
+
j. Share means to provide material to the public by any means or
|
| 125 |
+
process that requires permission under the Licensed Rights, such
|
| 126 |
+
as reproduction, public display, public performance, distribution,
|
| 127 |
+
dissemination, communication, or importation, and to make material
|
| 128 |
+
available to the public including in ways that members of the
|
| 129 |
+
public may access the material from a place and at a time
|
| 130 |
+
individually chosen by them.
|
| 131 |
+
|
| 132 |
+
k. Sui Generis Database Rights means rights other than copyright
|
| 133 |
+
resulting from Directive 96/9/EC of the European Parliament and of
|
| 134 |
+
the Council of 11 March 1996 on the legal protection of databases,
|
| 135 |
+
as amended and/or succeeded, as well as other essentially
|
| 136 |
+
equivalent rights anywhere in the world.
|
| 137 |
+
|
| 138 |
+
l. You means the individual or entity exercising the Licensed Rights
|
| 139 |
+
under this Public License. Your has a corresponding meaning.
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
Section 2 -- Scope.
|
| 143 |
+
|
| 144 |
+
a. License grant.
|
| 145 |
+
|
| 146 |
+
1. Subject to the terms and conditions of this Public License,
|
| 147 |
+
the Licensor hereby grants You a worldwide, royalty-free,
|
| 148 |
+
non-sublicensable, non-exclusive, irrevocable license to
|
| 149 |
+
exercise the Licensed Rights in the Licensed Material to:
|
| 150 |
+
|
| 151 |
+
a. reproduce and Share the Licensed Material, in whole or
|
| 152 |
+
in part, for NonCommercial purposes only; and
|
| 153 |
+
|
| 154 |
+
b. produce, reproduce, and Share Adapted Material for
|
| 155 |
+
NonCommercial purposes only.
|
| 156 |
+
|
| 157 |
+
2. Exceptions and Limitations. For the avoidance of doubt, where
|
| 158 |
+
Exceptions and Limitations apply to Your use, this Public
|
| 159 |
+
License does not apply, and You do not need to comply with
|
| 160 |
+
its terms and conditions.
|
| 161 |
+
|
| 162 |
+
3. Term. The term of this Public License is specified in Section
|
| 163 |
+
6(a).
|
| 164 |
+
|
| 165 |
+
4. Media and formats; technical modifications allowed. The
|
| 166 |
+
Licensor authorizes You to exercise the Licensed Rights in
|
| 167 |
+
all media and formats whether now known or hereafter created,
|
| 168 |
+
and to make technical modifications necessary to do so. The
|
| 169 |
+
Licensor waives and/or agrees not to assert any right or
|
| 170 |
+
authority to forbid You from making technical modifications
|
| 171 |
+
necessary to exercise the Licensed Rights, including
|
| 172 |
+
technical modifications necessary to circumvent Effective
|
| 173 |
+
Technological Measures. For purposes of this Public License,
|
| 174 |
+
simply making modifications authorized by this Section 2(a)
|
| 175 |
+
(4) never produces Adapted Material.
|
| 176 |
+
|
| 177 |
+
5. Downstream recipients.
|
| 178 |
+
|
| 179 |
+
a. Offer from the Licensor -- Licensed Material. Every
|
| 180 |
+
recipient of the Licensed Material automatically
|
| 181 |
+
receives an offer from the Licensor to exercise the
|
| 182 |
+
Licensed Rights under the terms and conditions of this
|
| 183 |
+
Public License.
|
| 184 |
+
|
| 185 |
+
b. No downstream restrictions. You may not offer or impose
|
| 186 |
+
any additional or different terms or conditions on, or
|
| 187 |
+
apply any Effective Technological Measures to, the
|
| 188 |
+
Licensed Material if doing so restricts exercise of the
|
| 189 |
+
Licensed Rights by any recipient of the Licensed
|
| 190 |
+
Material.
|
| 191 |
+
|
| 192 |
+
6. No endorsement. Nothing in this Public License constitutes or
|
| 193 |
+
may be construed as permission to assert or imply that You
|
| 194 |
+
are, or that Your use of the Licensed Material is, connected
|
| 195 |
+
with, or sponsored, endorsed, or granted official status by,
|
| 196 |
+
the Licensor or others designated to receive attribution as
|
| 197 |
+
provided in Section 3(a)(1)(A)(i).
|
| 198 |
+
|
| 199 |
+
b. Other rights.
|
| 200 |
+
|
| 201 |
+
1. Moral rights, such as the right of integrity, are not
|
| 202 |
+
licensed under this Public License, nor are publicity,
|
| 203 |
+
privacy, and/or other similar personality rights; however, to
|
| 204 |
+
the extent possible, the Licensor waives and/or agrees not to
|
| 205 |
+
assert any such rights held by the Licensor to the limited
|
| 206 |
+
extent necessary to allow You to exercise the Licensed
|
| 207 |
+
Rights, but not otherwise.
|
| 208 |
+
|
| 209 |
+
2. Patent and trademark rights are not licensed under this
|
| 210 |
+
Public License.
|
| 211 |
+
|
| 212 |
+
3. To the extent possible, the Licensor waives any right to
|
| 213 |
+
collect royalties from You for the exercise of the Licensed
|
| 214 |
+
Rights, whether directly or through a collecting society
|
| 215 |
+
under any voluntary or waivable statutory or compulsory
|
| 216 |
+
licensing scheme. In all other cases the Licensor expressly
|
| 217 |
+
reserves any right to collect such royalties, including when
|
| 218 |
+
the Licensed Material is used other than for NonCommercial
|
| 219 |
+
purposes.
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
Section 3 -- License Conditions.
|
| 223 |
+
|
| 224 |
+
Your exercise of the Licensed Rights is expressly made subject to the
|
| 225 |
+
following conditions.
|
| 226 |
+
|
| 227 |
+
a. Attribution.
|
| 228 |
+
|
| 229 |
+
1. If You Share the Licensed Material (including in modified
|
| 230 |
+
form), You must:
|
| 231 |
+
|
| 232 |
+
a. retain the following if it is supplied by the Licensor
|
| 233 |
+
with the Licensed Material:
|
| 234 |
+
|
| 235 |
+
i. identification of the creator(s) of the Licensed
|
| 236 |
+
Material and any others designated to receive
|
| 237 |
+
attribution, in any reasonable manner requested by
|
| 238 |
+
the Licensor (including by pseudonym if
|
| 239 |
+
designated);
|
| 240 |
+
|
| 241 |
+
ii. a copyright notice;
|
| 242 |
+
|
| 243 |
+
iii. a notice that refers to this Public License;
|
| 244 |
+
|
| 245 |
+
iv. a notice that refers to the disclaimer of
|
| 246 |
+
warranties;
|
| 247 |
+
|
| 248 |
+
v. a URI or hyperlink to the Licensed Material to the
|
| 249 |
+
extent reasonably practicable;
|
| 250 |
+
|
| 251 |
+
b. indicate if You modified the Licensed Material and
|
| 252 |
+
retain an indication of any previous modifications; and
|
| 253 |
+
|
| 254 |
+
c. indicate the Licensed Material is licensed under this
|
| 255 |
+
Public License, and include the text of, or the URI or
|
| 256 |
+
hyperlink to, this Public License.
|
| 257 |
+
|
| 258 |
+
2. You may satisfy the conditions in Section 3(a)(1) in any
|
| 259 |
+
reasonable manner based on the medium, means, and context in
|
| 260 |
+
which You Share the Licensed Material. For example, it may be
|
| 261 |
+
reasonable to satisfy the conditions by providing a URI or
|
| 262 |
+
hyperlink to a resource that includes the required
|
| 263 |
+
information.
|
| 264 |
+
|
| 265 |
+
3. If requested by the Licensor, You must remove any of the
|
| 266 |
+
information required by Section 3(a)(1)(A) to the extent
|
| 267 |
+
reasonably practicable.
|
| 268 |
+
|
| 269 |
+
4. If You Share Adapted Material You produce, the Adapter's
|
| 270 |
+
License You apply must not prevent recipients of the Adapted
|
| 271 |
+
Material from complying with this Public License.
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
Section 4 -- Sui Generis Database Rights.
|
| 275 |
+
|
| 276 |
+
Where the Licensed Rights include Sui Generis Database Rights that
|
| 277 |
+
apply to Your use of the Licensed Material:
|
| 278 |
+
|
| 279 |
+
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
| 280 |
+
to extract, reuse, reproduce, and Share all or a substantial
|
| 281 |
+
portion of the contents of the database for NonCommercial purposes
|
| 282 |
+
only;
|
| 283 |
+
|
| 284 |
+
b. if You include all or a substantial portion of the database
|
| 285 |
+
contents in a database in which You have Sui Generis Database
|
| 286 |
+
Rights, then the database in which You have Sui Generis Database
|
| 287 |
+
Rights (but not its individual contents) is Adapted Material; and
|
| 288 |
+
|
| 289 |
+
c. You must comply with the conditions in Section 3(a) if You Share
|
| 290 |
+
all or a substantial portion of the contents of the database.
|
| 291 |
+
|
| 292 |
+
For the avoidance of doubt, this Section 4 supplements and does not
|
| 293 |
+
replace Your obligations under this Public License where the Licensed
|
| 294 |
+
Rights include other Copyright and Similar Rights.
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
| 298 |
+
|
| 299 |
+
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
| 300 |
+
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
| 301 |
+
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
| 302 |
+
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
| 303 |
+
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
| 304 |
+
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
| 305 |
+
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
| 306 |
+
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
| 307 |
+
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
| 308 |
+
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
| 309 |
+
|
| 310 |
+
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
| 311 |
+
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
| 312 |
+
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
| 313 |
+
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
| 314 |
+
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
| 315 |
+
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
| 316 |
+
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
| 317 |
+
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
| 318 |
+
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
| 319 |
+
|
| 320 |
+
c. The disclaimer of warranties and limitation of liability provided
|
| 321 |
+
above shall be interpreted in a manner that, to the extent
|
| 322 |
+
possible, most closely approximates an absolute disclaimer and
|
| 323 |
+
waiver of all liability.
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
Section 6 -- Term and Termination.
|
| 327 |
+
|
| 328 |
+
a. This Public License applies for the term of the Copyright and
|
| 329 |
+
Similar Rights licensed here. However, if You fail to comply with
|
| 330 |
+
this Public License, then Your rights under this Public License
|
| 331 |
+
terminate automatically.
|
| 332 |
+
|
| 333 |
+
b. Where Your right to use the Licensed Material has terminated under
|
| 334 |
+
Section 6(a), it reinstates:
|
| 335 |
+
|
| 336 |
+
1. automatically as of the date the violation is cured, provided
|
| 337 |
+
it is cured within 30 days of Your discovery of the
|
| 338 |
+
violation; or
|
| 339 |
+
|
| 340 |
+
2. upon express reinstatement by the Licensor.
|
| 341 |
+
|
| 342 |
+
For the avoidance of doubt, this Section 6(b) does not affect any
|
| 343 |
+
right the Licensor may have to seek remedies for Your violations
|
| 344 |
+
of this Public License.
|
| 345 |
+
|
| 346 |
+
c. For the avoidance of doubt, the Licensor may also offer the
|
| 347 |
+
Licensed Material under separate terms or conditions or stop
|
| 348 |
+
distributing the Licensed Material at any time; however, doing so
|
| 349 |
+
will not terminate this Public License.
|
| 350 |
+
|
| 351 |
+
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
| 352 |
+
License.
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
Section 7 -- Other Terms and Conditions.
|
| 356 |
+
|
| 357 |
+
a. The Licensor shall not be bound by any additional or different
|
| 358 |
+
terms or conditions communicated by You unless expressly agreed.
|
| 359 |
+
|
| 360 |
+
b. Any arrangements, understandings, or agreements regarding the
|
| 361 |
+
Licensed Material not stated herein are separate from and
|
| 362 |
+
independent of the terms and conditions of this Public License.
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
Section 8 -- Interpretation.
|
| 366 |
+
|
| 367 |
+
a. For the avoidance of doubt, this Public License does not, and
|
| 368 |
+
shall not be interpreted to, reduce, limit, restrict, or impose
|
| 369 |
+
conditions on any use of the Licensed Material that could lawfully
|
| 370 |
+
be made without permission under this Public License.
|
| 371 |
+
|
| 372 |
+
b. To the extent possible, if any provision of this Public License is
|
| 373 |
+
deemed unenforceable, it shall be automatically reformed to the
|
| 374 |
+
minimum extent necessary to make it enforceable. If the provision
|
| 375 |
+
cannot be reformed, it shall be severed from this Public License
|
| 376 |
+
without affecting the enforceability of the remaining terms and
|
| 377 |
+
conditions.
|
| 378 |
+
|
| 379 |
+
c. No term or condition of this Public License will be waived and no
|
| 380 |
+
failure to comply consented to unless expressly agreed to by the
|
| 381 |
+
Licensor.
|
| 382 |
+
|
| 383 |
+
d. Nothing in this Public License constitutes or may be interpreted
|
| 384 |
+
as a limitation upon, or waiver of, any privileges and immunities
|
| 385 |
+
that apply to the Licensor or You, including from the legal
|
| 386 |
+
processes of any jurisdiction or authority.
|
| 387 |
+
|
| 388 |
+
=======================================================================
|
| 389 |
+
|
| 390 |
+
Creative Commons is not a party to its public
|
| 391 |
+
licenses. Notwithstanding, Creative Commons may elect to apply one of
|
| 392 |
+
its public licenses to material it publishes and in those instances
|
| 393 |
+
will be considered the "Licensor." The text of the Creative Commons
|
| 394 |
+
public licenses is dedicated to the public domain under the CC0 Public
|
| 395 |
+
Domain Dedication. Except for the limited purpose of indicating that
|
| 396 |
+
material is shared under a Creative Commons public license or as
|
| 397 |
+
otherwise permitted by the Creative Commons policies published at
|
| 398 |
+
creativecommons.org/policies, Creative Commons does not authorize the
|
| 399 |
+
use of the trademark "Creative Commons" or any other trademark or logo
|
| 400 |
+
of Creative Commons without its prior written consent including,
|
| 401 |
+
without limitation, in connection with any unauthorized modifications
|
| 402 |
+
to any of its public licenses or any other arrangements,
|
| 403 |
+
understandings, or agreements concerning use of licensed material. For
|
| 404 |
+
the avoidance of doubt, this paragraph does not form part of the
|
| 405 |
+
public licenses.
|
| 406 |
+
|
| 407 |
+
Creative Commons may be contacted at creativecommons.org.
|
MODEL_CARD.md
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-nc-4.0
|
| 3 |
+
library_name: cortexlab
|
| 4 |
+
tags:
|
| 5 |
+
- neuroscience
|
| 6 |
+
- fmri
|
| 7 |
+
- brain-encoding
|
| 8 |
+
- multimodal
|
| 9 |
+
- tribe-v2
|
| 10 |
+
- brain-alignment
|
| 11 |
+
- cognitive-load
|
| 12 |
+
language:
|
| 13 |
+
- en
|
| 14 |
+
pipeline_tag: other
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
# CortexLab
|
| 18 |
+
|
| 19 |
+
Enhanced multimodal fMRI brain encoding toolkit built on [Meta's TRIBE v2](https://github.com/facebookresearch/tribev2).
|
| 20 |
+
|
| 21 |
+
CortexLab extends TRIBE v2 with streaming inference, interpretability tools, cross-subject adaptation, brain-alignment benchmarking, and cognitive load scoring.
|
| 22 |
+
|
| 23 |
+
## What This Repo Contains
|
| 24 |
+
|
| 25 |
+
This is a **code-only** repository. It does not contain pretrained weights. The pretrained TRIBE v2 model is hosted by Meta at [`facebook/tribev2`](https://huggingface.co/facebook/tribev2).
|
| 26 |
+
|
| 27 |
+
## Features
|
| 28 |
+
|
| 29 |
+
| Feature | Description |
|
| 30 |
+
|---|---|
|
| 31 |
+
| **Streaming Inference** | Sliding-window real-time predictions from live feature streams |
|
| 32 |
+
| **ROI Attention Maps** | Visualize which brain regions attend to which temporal moments |
|
| 33 |
+
| **Modality Attribution** | Per-vertex importance scores for text, audio, and video |
|
| 34 |
+
| **Cross-Subject Adaptation** | Ridge regression or nearest-neighbour adaptation for new subjects |
|
| 35 |
+
| **Brain-Alignment Benchmark** | Score how "brain-like" any AI model's representations are (RSA, CKA, Procrustes) |
|
| 36 |
+
| **Cognitive Load Scorer** | Predict cognitive demand of media from predicted brain activation patterns |
|
| 37 |
+
|
| 38 |
+
## Prerequisites
|
| 39 |
+
|
| 40 |
+
The pretrained TRIBE v2 model uses **LLaMA 3.2-3B** as its text encoder. You must:
|
| 41 |
+
|
| 42 |
+
1. Accept Meta's LLaMA license at [llama.meta.com](https://llama.meta.com/)
|
| 43 |
+
2. Request access on [HuggingFace](https://huggingface.co/meta-llama/Llama-3.2-3B)
|
| 44 |
+
3. Authenticate: `huggingface-cli login`
|
| 45 |
+
|
| 46 |
+
## Installation
|
| 47 |
+
|
| 48 |
+
```bash
|
| 49 |
+
git clone https://github.com/siddhant-rajhans/cortexlab.git
|
| 50 |
+
cd cortexlab
|
| 51 |
+
pip install -e ".[analysis]"
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
## Quick Start
|
| 55 |
+
|
| 56 |
+
### Inference
|
| 57 |
+
|
| 58 |
+
```python
|
| 59 |
+
from cortexlab.inference.predictor import TribeModel
|
| 60 |
+
|
| 61 |
+
model = TribeModel.from_pretrained("facebook/tribev2", device="auto")
|
| 62 |
+
events = model.get_events_dataframe(video_path="clip.mp4")
|
| 63 |
+
preds, segments = model.predict(events)
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
### Brain-Alignment Benchmark
|
| 67 |
+
|
| 68 |
+
```python
|
| 69 |
+
from cortexlab.analysis import BrainAlignmentBenchmark
|
| 70 |
+
|
| 71 |
+
bench = BrainAlignmentBenchmark(brain_predictions, roi_indices=roi_indices)
|
| 72 |
+
result = bench.score_model(clip_features, method="rsa")
|
| 73 |
+
print(f"Alignment: {result.aggregate_score:.3f}")
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
### Cognitive Load Scoring
|
| 77 |
+
|
| 78 |
+
```python
|
| 79 |
+
from cortexlab.analysis import CognitiveLoadScorer
|
| 80 |
+
|
| 81 |
+
scorer = CognitiveLoadScorer(roi_indices)
|
| 82 |
+
result = scorer.score_predictions(predictions)
|
| 83 |
+
print(f"Overall load: {result.overall_load:.2f}")
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
## Compute Requirements
|
| 87 |
+
|
| 88 |
+
| Component | VRAM | Notes |
|
| 89 |
+
|---|---|---|
|
| 90 |
+
| TRIBE v2 encoder | ~1 GB | Small (1.15M params) |
|
| 91 |
+
| LLaMA 3.2-3B (text) | ~8 GB | Features cached after first run |
|
| 92 |
+
| V-JEPA2 (video) | ~6 GB | Features cached after first run |
|
| 93 |
+
| Wav2Vec-BERT (audio) | ~3 GB | Features cached after first run |
|
| 94 |
+
|
| 95 |
+
Minimum: 16 GB VRAM GPU for full inference. CPU works but is slow. Analysis tools (benchmark, cognitive load) work with zero GPU on precomputed predictions.
|
| 96 |
+
|
| 97 |
+
## Architecture
|
| 98 |
+
|
| 99 |
+
```
|
| 100 |
+
src/cortexlab/
|
| 101 |
+
core/ Model architecture, attention extraction, subject adaptation
|
| 102 |
+
data/ Dataset loading, transforms, HCP ROI utilities
|
| 103 |
+
training/ PyTorch Lightning training pipeline
|
| 104 |
+
inference/ Predictor, streaming, modality attribution
|
| 105 |
+
analysis/ Brain-alignment benchmark, cognitive load scorer
|
| 106 |
+
viz/ Brain surface visualization (nilearn, pyvista)
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
## License
|
| 110 |
+
|
| 111 |
+
CC BY-NC 4.0 (non-commercial use only), inherited from TRIBE v2.
|
| 112 |
+
|
| 113 |
+
This project does not redistribute pretrained weights. Users must download weights directly from [`facebook/tribev2`](https://huggingface.co/facebook/tribev2).
|
| 114 |
+
|
| 115 |
+
## Citation
|
| 116 |
+
|
| 117 |
+
If you use CortexLab in your research, please cite the original TRIBE v2 paper:
|
| 118 |
+
|
| 119 |
+
```bibtex
|
| 120 |
+
@article{dascoli2026tribe,
|
| 121 |
+
title={A foundation model of vision, audition, and language for in-silico neuroscience},
|
| 122 |
+
author={d'Ascoli, St{\'e}phane and others},
|
| 123 |
+
year={2026}
|
| 124 |
+
}
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
## Links
|
| 128 |
+
|
| 129 |
+
- **GitHub**: [siddhant-rajhans/cortexlab](https://github.com/siddhant-rajhans/cortexlab)
|
| 130 |
+
- **TRIBE v2**: [facebookresearch/tribev2](https://github.com/facebookresearch/tribev2)
|
| 131 |
+
- **Pretrained weights**: [facebook/tribev2](https://huggingface.co/facebook/tribev2)
|
NOTICE
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CortexLab
|
| 2 |
+
Copyright 2026 CortexLab Contributors
|
| 3 |
+
|
| 4 |
+
This project is a derivative work built upon TRIBE v2 by Meta Platforms, Inc.
|
| 5 |
+
|
| 6 |
+
================================================================================
|
| 7 |
+
|
| 8 |
+
TRIBE v2
|
| 9 |
+
Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 10 |
+
Licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
|
| 11 |
+
https://github.com/facebookresearch/tribev2
|
| 12 |
+
|
| 13 |
+
The following source files in this repository are derived from TRIBE v2:
|
| 14 |
+
- src/cortexlab/core/model.py (modified: added return_attn, compile_backbone)
|
| 15 |
+
- src/cortexlab/data/loader.py (original: tribev2/utils.py)
|
| 16 |
+
- src/cortexlab/data/transforms.py (original: tribev2/eventstransforms.py)
|
| 17 |
+
- src/cortexlab/data/fmri.py (original: tribev2/utils_fmri.py)
|
| 18 |
+
- src/cortexlab/data/studies/* (original: tribev2/studies/*)
|
| 19 |
+
- src/cortexlab/training/experiment.py (original: tribev2/main.py)
|
| 20 |
+
- src/cortexlab/training/pl_module.py (original: tribev2/pl_module.py)
|
| 21 |
+
- src/cortexlab/inference/predictor.py (original: tribev2/demo_utils.py)
|
| 22 |
+
- src/cortexlab/viz/* (original: tribev2/plotting/*)
|
| 23 |
+
|
| 24 |
+
The pretrained model weights are hosted by Meta at:
|
| 25 |
+
https://huggingface.co/facebook/tribev2
|
| 26 |
+
|
| 27 |
+
================================================================================
|
| 28 |
+
|
| 29 |
+
neuralset (v0.0.2)
|
| 30 |
+
Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 31 |
+
Licensed under CC BY-NC 4.0
|
| 32 |
+
|
| 33 |
+
neuraltrain (v0.0.2)
|
| 34 |
+
Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 35 |
+
Licensed under CC BY-NC 4.0
|
| 36 |
+
|
| 37 |
+
================================================================================
|
| 38 |
+
|
| 39 |
+
Third-party dependencies and their licenses:
|
| 40 |
+
|
| 41 |
+
PyTorch (torch) - BSD 3-Clause
|
| 42 |
+
Transformers - Apache 2.0
|
| 43 |
+
HuggingFace Hub - Apache 2.0
|
| 44 |
+
x_transformers - MIT
|
| 45 |
+
einops - MIT
|
| 46 |
+
NumPy - BSD 3-Clause
|
| 47 |
+
pandas - BSD 3-Clause
|
| 48 |
+
PyYAML - MIT
|
| 49 |
+
MoviePy - MIT
|
| 50 |
+
gTTS - MIT
|
| 51 |
+
langdetect - Apache 2.0
|
| 52 |
+
spaCy - MIT
|
| 53 |
+
SoundFile - BSD 3-Clause
|
| 54 |
+
julius - MIT
|
| 55 |
+
pydantic - MIT
|
| 56 |
+
requests - Apache 2.0
|
| 57 |
+
tqdm - MIT / MPL 2.0
|
| 58 |
+
SciPy - BSD 3-Clause
|
| 59 |
+
nilearn - BSD 3-Clause
|
| 60 |
+
MNE-Python - BSD 3-Clause
|
| 61 |
+
PyVista - MIT
|
| 62 |
+
matplotlib - PSF / BSD
|
| 63 |
+
PyTorch Lightning - Apache 2.0
|
| 64 |
+
Weights & Biases - MIT
|
| 65 |
+
|
| 66 |
+
================================================================================
|
| 67 |
+
|
| 68 |
+
LLaMA 3.2-3B (used as text feature extractor by the pretrained TRIBE v2 model)
|
| 69 |
+
Copyright (c) Meta Platforms, Inc.
|
| 70 |
+
Licensed under the Meta Llama 3.2 Community License Agreement
|
| 71 |
+
https://llama.meta.com/llama3_2/license/
|
| 72 |
+
|
| 73 |
+
Users must independently accept Meta's LLaMA license to use the pretrained
|
| 74 |
+
TRIBE v2 model for text feature extraction. CortexLab does not redistribute
|
| 75 |
+
LLaMA weights.
|
README.md
CHANGED
|
@@ -1,3 +1,136 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CortexLab
|
| 2 |
+
|
| 3 |
+
Enhanced multimodal fMRI brain encoding toolkit built on [Meta's TRIBE v2](https://github.com/facebookresearch/tribev2).
|
| 4 |
+
|
| 5 |
+
CortexLab extends TRIBE v2 with streaming inference, interpretability tools, cross-subject adaptation, brain-alignment benchmarking, and cognitive load scoring.
|
| 6 |
+
|
| 7 |
+
## Features
|
| 8 |
+
|
| 9 |
+
| Feature | Description |
|
| 10 |
+
|---|---|
|
| 11 |
+
| **Streaming Inference** | Sliding-window real-time predictions from live feature streams |
|
| 12 |
+
| **ROI Attention Maps** | Visualize which brain regions attend to which temporal moments |
|
| 13 |
+
| **Modality Attribution** | Per-vertex importance scores for text, audio, and video |
|
| 14 |
+
| **Cross-Subject Adaptation** | Ridge regression or nearest-neighbour adaptation for new subjects |
|
| 15 |
+
| **Brain-Alignment Benchmark** | Score how "brain-like" any AI model's representations are (RSA, CKA, Procrustes) |
|
| 16 |
+
| **Cognitive Load Scorer** | Predict cognitive demand of media from predicted brain activation patterns |
|
| 17 |
+
|
| 18 |
+
## Prerequisites
|
| 19 |
+
|
| 20 |
+
The pretrained TRIBE v2 model uses **LLaMA 3.2-3B** as its text encoder. You must accept Meta's LLaMA license before using it:
|
| 21 |
+
|
| 22 |
+
1. Visit [llama.meta.com](https://llama.meta.com/) and accept the license
|
| 23 |
+
2. Request access on [HuggingFace](https://huggingface.co/meta-llama/Llama-3.2-3B)
|
| 24 |
+
3. Authenticate: `huggingface-cli login`
|
| 25 |
+
|
| 26 |
+
## Installation
|
| 27 |
+
|
| 28 |
+
```bash
|
| 29 |
+
pip install -e "."
|
| 30 |
+
|
| 31 |
+
# With optional dependencies
|
| 32 |
+
pip install -e ".[plotting]" # Brain visualization
|
| 33 |
+
pip install -e ".[training]" # PyTorch Lightning training
|
| 34 |
+
pip install -e ".[analysis]" # RSA/CKA benchmarking (scipy)
|
| 35 |
+
pip install -e ".[dev]" # Testing and linting
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
## Quick Start
|
| 39 |
+
|
| 40 |
+
### Inference
|
| 41 |
+
|
| 42 |
+
```python
|
| 43 |
+
from cortexlab.inference.predictor import TribeModel
|
| 44 |
+
|
| 45 |
+
model = TribeModel.from_pretrained("facebook/tribev2", device="auto")
|
| 46 |
+
events = model.get_events_dataframe(video_path="clip.mp4")
|
| 47 |
+
preds, segments = model.predict(events)
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### Brain-Alignment Benchmark
|
| 51 |
+
|
| 52 |
+
```python
|
| 53 |
+
from cortexlab.analysis import BrainAlignmentBenchmark
|
| 54 |
+
|
| 55 |
+
bench = BrainAlignmentBenchmark(brain_predictions, roi_indices=roi_indices)
|
| 56 |
+
result = bench.score_model(clip_features, method="rsa")
|
| 57 |
+
print(f"Alignment: {result.aggregate_score:.3f}")
|
| 58 |
+
print(f"V1 alignment: {result.roi_scores['V1']:.3f}")
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
### Cognitive Load Scoring
|
| 62 |
+
|
| 63 |
+
```python
|
| 64 |
+
from cortexlab.analysis import CognitiveLoadScorer
|
| 65 |
+
|
| 66 |
+
scorer = CognitiveLoadScorer(roi_indices)
|
| 67 |
+
result = scorer.score_predictions(predictions)
|
| 68 |
+
print(f"Overall load: {result.overall_load:.2f}")
|
| 69 |
+
print(f"Visual complexity: {result.visual_complexity:.2f}")
|
| 70 |
+
print(f"Language processing: {result.language_processing:.2f}")
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
### Streaming Inference
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
from cortexlab.inference import StreamingPredictor
|
| 77 |
+
|
| 78 |
+
sp = StreamingPredictor(model._model, window_trs=40, step_trs=1, device="cuda")
|
| 79 |
+
for features in live_feature_stream():
|
| 80 |
+
pred = sp.push_frame(features)
|
| 81 |
+
if pred is not None:
|
| 82 |
+
visualize(pred) # (n_vertices,)
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
### Modality Attribution
|
| 86 |
+
|
| 87 |
+
```python
|
| 88 |
+
from cortexlab.inference import ModalityAttributor
|
| 89 |
+
|
| 90 |
+
attributor = ModalityAttributor(model._model, roi_indices=roi_indices)
|
| 91 |
+
scores = attributor.attribute(batch)
|
| 92 |
+
# scores["text"], scores["audio"], scores["video"] -> (n_vertices,)
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
### Cross-Subject Adaptation
|
| 96 |
+
|
| 97 |
+
```python
|
| 98 |
+
from cortexlab.core.subject import SubjectAdapter
|
| 99 |
+
|
| 100 |
+
adapter = SubjectAdapter.from_ridge(model._model, calibration_loader, regularization=1e-3)
|
| 101 |
+
new_subject_id = adapter.inject_into_model(model._model)
|
| 102 |
+
```
|
| 103 |
+
|
| 104 |
+
## Architecture
|
| 105 |
+
|
| 106 |
+
```
|
| 107 |
+
src/cortexlab/
|
| 108 |
+
core/ Model architecture, attention extraction, subject adaptation
|
| 109 |
+
data/ Dataset loading, transforms, HCP ROI utilities
|
| 110 |
+
training/ PyTorch Lightning training pipeline
|
| 111 |
+
inference/ Predictor, streaming, modality attribution
|
| 112 |
+
analysis/ Brain-alignment benchmark, cognitive load scorer
|
| 113 |
+
viz/ Brain surface visualization (nilearn, pyvista)
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
## Development
|
| 117 |
+
|
| 118 |
+
```bash
|
| 119 |
+
pip install -e ".[dev]"
|
| 120 |
+
pytest tests/ -v
|
| 121 |
+
ruff check src/ tests/
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
## License
|
| 125 |
+
|
| 126 |
+
CC BY-NC 4.0 (inherited from TRIBE v2). See [LICENSE](LICENSE) and [NOTICE](NOTICE).
|
| 127 |
+
|
| 128 |
+
This project is for **non-commercial use only**. The pretrained weights are hosted by Meta at [facebook/tribev2](https://huggingface.co/facebook/tribev2) and are not redistributed by this project.
|
| 129 |
+
|
| 130 |
+
## Acknowledgements
|
| 131 |
+
|
| 132 |
+
Built on [TRIBE v2](https://github.com/facebookresearch/tribev2) by Meta FAIR.
|
| 133 |
+
|
| 134 |
+
> d'Ascoli et al., "A foundation model of vision, audition, and language for in-silico neuroscience", 2026.
|
| 135 |
+
|
| 136 |
+
See [NOTICE](NOTICE) for full attribution and third-party licenses.
|
examples/quick_start.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CortexLab Quick Start Example.
|
| 2 |
+
|
| 3 |
+
Demonstrates loading a pretrained model, running inference,
|
| 4 |
+
and using the brain-alignment benchmark and cognitive load scorer.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def run_benchmark_demo():
|
| 11 |
+
"""Demonstrate the brain-alignment benchmark with synthetic data."""
|
| 12 |
+
from cortexlab.analysis import BrainAlignmentBenchmark
|
| 13 |
+
|
| 14 |
+
n_stimuli = 50
|
| 15 |
+
model_features = np.random.randn(n_stimuli, 768) # e.g. CLIP features
|
| 16 |
+
brain_predictions = np.random.randn(n_stimuli, 20484) # fsaverage5 vertices
|
| 17 |
+
|
| 18 |
+
roi_indices = {
|
| 19 |
+
"V1": np.arange(0, 100),
|
| 20 |
+
"MT": np.arange(500, 600),
|
| 21 |
+
"A1": np.arange(1000, 1100),
|
| 22 |
+
"Broca": np.arange(2000, 2100),
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
bench = BrainAlignmentBenchmark(brain_predictions, roi_indices=roi_indices)
|
| 26 |
+
|
| 27 |
+
for method in ["rsa", "cka", "procrustes"]:
|
| 28 |
+
result = bench.score_model(model_features, method=method)
|
| 29 |
+
print(f"[{method.upper()}] Aggregate: {result.aggregate_score:.4f}")
|
| 30 |
+
for roi, score in sorted(result.roi_scores.items()):
|
| 31 |
+
print(f" {roi}: {score:.4f}")
|
| 32 |
+
print()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def run_cognitive_load_demo():
|
| 36 |
+
"""Demonstrate the cognitive load scorer with synthetic predictions."""
|
| 37 |
+
from cortexlab.analysis import CognitiveLoadScorer
|
| 38 |
+
|
| 39 |
+
roi_indices = {
|
| 40 |
+
"46": np.arange(0, 10),
|
| 41 |
+
"FEF": np.arange(10, 20),
|
| 42 |
+
"V1": np.arange(100, 120),
|
| 43 |
+
"V2": np.arange(120, 140),
|
| 44 |
+
"MT": np.arange(140, 160),
|
| 45 |
+
"A1": np.arange(200, 220),
|
| 46 |
+
"LBelt": np.arange(220, 230),
|
| 47 |
+
"44": np.arange(300, 310),
|
| 48 |
+
"45": np.arange(310, 320),
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
scorer = CognitiveLoadScorer(roi_indices, baseline_activation=0.5)
|
| 52 |
+
|
| 53 |
+
# Simulate 30 seconds of predictions
|
| 54 |
+
predictions = np.random.randn(30, 500) * 0.5
|
| 55 |
+
# Add high visual activation
|
| 56 |
+
predictions[:, 100:160] *= 3.0
|
| 57 |
+
|
| 58 |
+
result = scorer.score_predictions(predictions, tr_seconds=1.0)
|
| 59 |
+
print(f"Overall cognitive load: {result.overall_load:.2f}")
|
| 60 |
+
print(f"Visual complexity: {result.visual_complexity:.2f}")
|
| 61 |
+
print(f"Auditory demand: {result.auditory_demand:.2f}")
|
| 62 |
+
print(f"Language processing: {result.language_processing:.2f}")
|
| 63 |
+
print(f"Executive load: {result.executive_load:.2f}")
|
| 64 |
+
print(f"Timeline points: {len(result.timeline)}")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
if __name__ == "__main__":
|
| 68 |
+
print("=== Brain-Alignment Benchmark ===")
|
| 69 |
+
run_benchmark_demo()
|
| 70 |
+
|
| 71 |
+
print("=== Cognitive Load Scorer ===")
|
| 72 |
+
run_cognitive_load_demo()
|
pyproject.toml
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["setuptools>=61.0"]
|
| 3 |
+
build-backend = "setuptools.build_meta"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "cortexlab"
|
| 7 |
+
version = "0.1.0"
|
| 8 |
+
description = "Enhanced multimodal fMRI brain encoding toolkit built on TRIBE v2"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.10"
|
| 11 |
+
license = {file = "LICENSE"}
|
| 12 |
+
authors = [{name = "CortexLab Contributors"}]
|
| 13 |
+
|
| 14 |
+
dependencies = [
|
| 15 |
+
"neuralset==0.0.2",
|
| 16 |
+
"neuraltrain==0.0.2",
|
| 17 |
+
"torch>=2.5.1,<2.7",
|
| 18 |
+
"numpy>=2.0",
|
| 19 |
+
"torchvision>=0.20,<0.22",
|
| 20 |
+
"x_transformers==1.27.20",
|
| 21 |
+
"einops",
|
| 22 |
+
"pyyaml",
|
| 23 |
+
"moviepy>=2.2.1",
|
| 24 |
+
"huggingface_hub",
|
| 25 |
+
"gtts",
|
| 26 |
+
"langdetect",
|
| 27 |
+
"spacy",
|
| 28 |
+
"soundfile",
|
| 29 |
+
"julius",
|
| 30 |
+
"transformers",
|
| 31 |
+
"pydantic",
|
| 32 |
+
"exca",
|
| 33 |
+
"requests",
|
| 34 |
+
"tqdm",
|
| 35 |
+
"pandas",
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
[project.optional-dependencies]
|
| 39 |
+
plotting = [
|
| 40 |
+
"nibabel",
|
| 41 |
+
"matplotlib",
|
| 42 |
+
"seaborn",
|
| 43 |
+
"colorcet",
|
| 44 |
+
"nilearn",
|
| 45 |
+
"scipy",
|
| 46 |
+
"pyvista",
|
| 47 |
+
"scikit-image",
|
| 48 |
+
"mne",
|
| 49 |
+
]
|
| 50 |
+
training = [
|
| 51 |
+
"nibabel",
|
| 52 |
+
"torchmetrics",
|
| 53 |
+
"wandb",
|
| 54 |
+
"lightning",
|
| 55 |
+
]
|
| 56 |
+
streaming = [
|
| 57 |
+
"av",
|
| 58 |
+
]
|
| 59 |
+
analysis = [
|
| 60 |
+
"scipy",
|
| 61 |
+
]
|
| 62 |
+
dev = [
|
| 63 |
+
"pytest",
|
| 64 |
+
"pytest-cov",
|
| 65 |
+
"ruff",
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
[project.urls]
|
| 69 |
+
Repository = "https://github.com/siddhant-rajhans/cortexlab"
|
| 70 |
+
|
| 71 |
+
[tool.setuptools.packages.find]
|
| 72 |
+
where = ["src"]
|
| 73 |
+
include = ["cortexlab*"]
|
| 74 |
+
|
| 75 |
+
[tool.ruff]
|
| 76 |
+
line-length = 100
|
| 77 |
+
target-version = "py310"
|
| 78 |
+
|
| 79 |
+
[tool.ruff.lint]
|
| 80 |
+
select = ["E", "F", "I", "W"]
|
| 81 |
+
ignore = ["E501", "F401", "F403", "E402"]
|
| 82 |
+
|
| 83 |
+
[tool.pytest.ini_options]
|
| 84 |
+
testpaths = ["tests"]
|
| 85 |
+
python_files = "test_*.py"
|
src/cortexlab/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CortexLab - Enhanced multimodal fMRI brain encoding toolkit.
|
| 2 |
+
|
| 3 |
+
Built on Meta's TRIBE v2 foundation model, CortexLab adds streaming
|
| 4 |
+
inference, modality attribution, cross-subject adaptation,
|
| 5 |
+
brain-alignment benchmarking, and cognitive load scoring.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
__version__ = "0.1.0"
|
| 9 |
+
|
| 10 |
+
from cortexlab.core.model import FmriEncoder, FmriEncoderModel
|
| 11 |
+
from cortexlab.core.attention import AttentionExtractor, attention_to_roi_scores
|
| 12 |
+
from cortexlab.core.subject import SubjectAdapter
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"FmriEncoder",
|
| 16 |
+
"FmriEncoderModel",
|
| 17 |
+
"AttentionExtractor",
|
| 18 |
+
"attention_to_roi_scores",
|
| 19 |
+
"SubjectAdapter",
|
| 20 |
+
]
|
src/cortexlab/analysis/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 2 |
+
from cortexlab.analysis.cognitive_load import CognitiveLoadScorer
|
| 3 |
+
|
| 4 |
+
__all__ = ["BrainAlignmentBenchmark", "CognitiveLoadScorer"]
|
src/cortexlab/analysis/brain_alignment.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Brain-alignment benchmark for comparing AI model representations.
|
| 2 |
+
|
| 3 |
+
Score how "brain-like" any AI model's internal representations are by
|
| 4 |
+
comparing them against TRIBE v2's predicted brain responses using
|
| 5 |
+
Representational Similarity Analysis (RSA) or Centered Kernel
|
| 6 |
+
Alignment (CKA).
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
from dataclasses import dataclass, field
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@dataclass
|
| 21 |
+
class AlignmentResult:
|
| 22 |
+
"""Results from a brain-alignment benchmark run."""
|
| 23 |
+
|
| 24 |
+
method: str
|
| 25 |
+
aggregate_score: float
|
| 26 |
+
roi_scores: dict[str, float] = field(default_factory=dict)
|
| 27 |
+
n_stimuli: int = 0
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _compute_rdm(features: np.ndarray) -> np.ndarray:
|
| 31 |
+
"""Compute a representational dissimilarity matrix (1 - cosine sim)."""
|
| 32 |
+
norms = np.linalg.norm(features, axis=1, keepdims=True)
|
| 33 |
+
norms = np.where(norms > 0, norms, 1.0)
|
| 34 |
+
normalised = features / norms
|
| 35 |
+
sim = normalised @ normalised.T
|
| 36 |
+
return 1.0 - sim
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _rsa_score(model_features: np.ndarray, brain_features: np.ndarray) -> float:
|
| 40 |
+
"""Representational Similarity Analysis via Spearman correlation of RDMs."""
|
| 41 |
+
from scipy.stats import spearmanr
|
| 42 |
+
|
| 43 |
+
rdm_model = _compute_rdm(model_features)
|
| 44 |
+
rdm_brain = _compute_rdm(brain_features)
|
| 45 |
+
# Extract upper triangle (excluding diagonal)
|
| 46 |
+
idx = np.triu_indices(rdm_model.shape[0], k=1)
|
| 47 |
+
corr, _ = spearmanr(rdm_model[idx], rdm_brain[idx])
|
| 48 |
+
return float(corr) if not np.isnan(corr) else 0.0
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _linear_cka(X: np.ndarray, Y: np.ndarray) -> float:
|
| 52 |
+
"""Compute linear Centered Kernel Alignment between two feature matrices.
|
| 53 |
+
|
| 54 |
+
CKA uses Gram matrices (n x n) so it naturally handles different
|
| 55 |
+
feature dimensions without truncation.
|
| 56 |
+
"""
|
| 57 |
+
n = X.shape[0]
|
| 58 |
+
# Centre
|
| 59 |
+
X = X - X.mean(axis=0)
|
| 60 |
+
Y = Y - Y.mean(axis=0)
|
| 61 |
+
# Gram matrices (n x n, dimension-independent)
|
| 62 |
+
XX = X @ X.T # (n, n)
|
| 63 |
+
YY = Y @ Y.T # (n, n)
|
| 64 |
+
# HSIC via Gram matrices - works regardless of feature dimensions
|
| 65 |
+
hsic_xy = np.trace(XX @ YY) / (n - 1) ** 2
|
| 66 |
+
hsic_xx = np.trace(XX @ XX) / (n - 1) ** 2
|
| 67 |
+
hsic_yy = np.trace(YY @ YY) / (n - 1) ** 2
|
| 68 |
+
denom = np.sqrt(hsic_xx * hsic_yy)
|
| 69 |
+
if denom < 1e-12:
|
| 70 |
+
return 0.0
|
| 71 |
+
return float(hsic_xy / denom)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _procrustes_score(X: np.ndarray, Y: np.ndarray) -> float:
|
| 75 |
+
"""Procrustes analysis: rotation-invariant shape comparison.
|
| 76 |
+
|
| 77 |
+
Works with different feature dimensions by truncating to the
|
| 78 |
+
smaller dimension.
|
| 79 |
+
"""
|
| 80 |
+
# Match dimensions by truncating to min(d_x, d_y)
|
| 81 |
+
min_dim = min(X.shape[1], Y.shape[1])
|
| 82 |
+
X = X[:, :min_dim]
|
| 83 |
+
Y = Y[:, :min_dim]
|
| 84 |
+
# Centre and scale
|
| 85 |
+
X = X - X.mean(axis=0)
|
| 86 |
+
Y = Y - Y.mean(axis=0)
|
| 87 |
+
norm_x = np.linalg.norm(X)
|
| 88 |
+
norm_y = np.linalg.norm(Y)
|
| 89 |
+
if norm_x < 1e-12 or norm_y < 1e-12:
|
| 90 |
+
return 0.0
|
| 91 |
+
X = X / norm_x
|
| 92 |
+
Y = Y / norm_y
|
| 93 |
+
# Optimal rotation via SVD
|
| 94 |
+
M = Y.T @ X
|
| 95 |
+
U, _, Vt = np.linalg.svd(M, full_matrices=False)
|
| 96 |
+
R = U @ Vt
|
| 97 |
+
rotated = Y @ R
|
| 98 |
+
# Score = 1 - normalized Procrustes distance
|
| 99 |
+
dist = np.linalg.norm(X - rotated)
|
| 100 |
+
return float(max(0.0, 1.0 - dist))
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
_METHODS = {
|
| 104 |
+
"rsa": _rsa_score,
|
| 105 |
+
"cka": _linear_cka,
|
| 106 |
+
"procrustes": _procrustes_score,
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class BrainAlignmentBenchmark:
|
| 111 |
+
"""Benchmark AI model representations against predicted brain responses.
|
| 112 |
+
|
| 113 |
+
Example
|
| 114 |
+
-------
|
| 115 |
+
>>> bench = BrainAlignmentBenchmark(brain_predictions)
|
| 116 |
+
>>> result = bench.score_model(clip_features, method="rsa")
|
| 117 |
+
>>> print(result.aggregate_score)
|
| 118 |
+
0.42
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
def __init__(
|
| 122 |
+
self,
|
| 123 |
+
brain_predictions: np.ndarray,
|
| 124 |
+
roi_indices: dict[str, np.ndarray] | None = None,
|
| 125 |
+
):
|
| 126 |
+
"""
|
| 127 |
+
Parameters
|
| 128 |
+
----------
|
| 129 |
+
brain_predictions : np.ndarray
|
| 130 |
+
Array of shape ``(n_stimuli, n_vertices)`` with predicted
|
| 131 |
+
fMRI responses from TRIBE v2 for a set of stimuli.
|
| 132 |
+
roi_indices : dict[str, np.ndarray], optional
|
| 133 |
+
HCP ROI name to vertex index mapping for per-ROI scoring.
|
| 134 |
+
"""
|
| 135 |
+
self.brain_predictions = brain_predictions
|
| 136 |
+
self.roi_indices = roi_indices
|
| 137 |
+
|
| 138 |
+
@classmethod
|
| 139 |
+
def from_pretrained(
|
| 140 |
+
cls,
|
| 141 |
+
checkpoint_dir: str = "facebook/tribev2",
|
| 142 |
+
roi_indices: dict[str, np.ndarray] | None = None,
|
| 143 |
+
**kwargs,
|
| 144 |
+
) -> BrainAlignmentBenchmark:
|
| 145 |
+
"""Create a benchmark instance with a loaded TRIBE v2 model.
|
| 146 |
+
|
| 147 |
+
The model is stored so you can call :meth:`score_model_with_stimuli`
|
| 148 |
+
to generate brain predictions on the fly.
|
| 149 |
+
"""
|
| 150 |
+
instance = cls(brain_predictions=np.array([]), roi_indices=roi_indices)
|
| 151 |
+
instance._checkpoint_dir = checkpoint_dir
|
| 152 |
+
instance._model_kwargs = kwargs
|
| 153 |
+
instance._model = None
|
| 154 |
+
return instance
|
| 155 |
+
|
| 156 |
+
def _ensure_model(self):
|
| 157 |
+
if self._model is None:
|
| 158 |
+
from cortexlab.inference.predictor import TribeModel
|
| 159 |
+
|
| 160 |
+
self._model = TribeModel.from_pretrained(
|
| 161 |
+
self._checkpoint_dir, **self._model_kwargs
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
def score_model(
|
| 165 |
+
self,
|
| 166 |
+
model_features: np.ndarray,
|
| 167 |
+
method: str = "rsa",
|
| 168 |
+
roi_filter: list[str] | None = None,
|
| 169 |
+
brain_predictions: np.ndarray | None = None,
|
| 170 |
+
) -> AlignmentResult:
|
| 171 |
+
"""Score how brain-aligned a set of model features are.
|
| 172 |
+
|
| 173 |
+
Parameters
|
| 174 |
+
----------
|
| 175 |
+
model_features : np.ndarray
|
| 176 |
+
Feature matrix of shape ``(n_stimuli, D)`` extracted from
|
| 177 |
+
any AI model for the same stimuli used to generate the
|
| 178 |
+
brain predictions.
|
| 179 |
+
method : str
|
| 180 |
+
Comparison method: ``"rsa"``, ``"cka"``, or ``"procrustes"``.
|
| 181 |
+
roi_filter : list[str], optional
|
| 182 |
+
If set, only compute alignment for these ROIs.
|
| 183 |
+
brain_predictions : np.ndarray, optional
|
| 184 |
+
Override the stored brain predictions.
|
| 185 |
+
|
| 186 |
+
Returns
|
| 187 |
+
-------
|
| 188 |
+
AlignmentResult
|
| 189 |
+
"""
|
| 190 |
+
if method not in _METHODS:
|
| 191 |
+
raise ValueError(f"Unknown method {method!r}. Choose from {list(_METHODS)}")
|
| 192 |
+
|
| 193 |
+
brain = brain_predictions if brain_predictions is not None else self.brain_predictions
|
| 194 |
+
score_fn = _METHODS[method]
|
| 195 |
+
|
| 196 |
+
if model_features.shape[0] != brain.shape[0]:
|
| 197 |
+
raise ValueError(
|
| 198 |
+
f"Stimulus count mismatch: model has {model_features.shape[0]}, "
|
| 199 |
+
f"brain has {brain.shape[0]}"
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
# Aggregate score (full vertex space)
|
| 203 |
+
aggregate = score_fn(model_features, brain)
|
| 204 |
+
|
| 205 |
+
# Per-ROI scores
|
| 206 |
+
roi_scores = {}
|
| 207 |
+
if self.roi_indices is not None:
|
| 208 |
+
rois = self.roi_indices
|
| 209 |
+
if roi_filter:
|
| 210 |
+
rois = {k: v for k, v in rois.items() if k in roi_filter}
|
| 211 |
+
for name, vertices in rois.items():
|
| 212 |
+
valid = vertices[vertices < brain.shape[1]]
|
| 213 |
+
if len(valid) < 2:
|
| 214 |
+
continue
|
| 215 |
+
roi_brain = brain[:, valid]
|
| 216 |
+
roi_scores[name] = score_fn(model_features, roi_brain)
|
| 217 |
+
|
| 218 |
+
return AlignmentResult(
|
| 219 |
+
method=method,
|
| 220 |
+
aggregate_score=aggregate,
|
| 221 |
+
roi_scores=roi_scores,
|
| 222 |
+
n_stimuli=model_features.shape[0],
|
| 223 |
+
)
|
src/cortexlab/analysis/cognitive_load.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Cognitive load scoring from predicted brain activation patterns.
|
| 2 |
+
|
| 3 |
+
Maps TRIBE v2's predicted fMRI responses onto cognitive dimensions
|
| 4 |
+
using established HCP MMP1.0 ROI groupings associated with different
|
| 5 |
+
cognitive functions.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import logging
|
| 11 |
+
from dataclasses import dataclass, field
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
# HCP MMP1.0 ROI groupings for cognitive dimensions.
|
| 18 |
+
# Each dimension maps to ROIs known to be involved in that function.
|
| 19 |
+
COGNITIVE_ROI_MAP: dict[str, list[str]] = {
|
| 20 |
+
"executive_load": [
|
| 21 |
+
# Dorsolateral prefrontal cortex
|
| 22 |
+
"46",
|
| 23 |
+
"9-46d",
|
| 24 |
+
"p9-46v",
|
| 25 |
+
"a9-46v",
|
| 26 |
+
"9a",
|
| 27 |
+
"8Av",
|
| 28 |
+
"8Ad",
|
| 29 |
+
"8BL",
|
| 30 |
+
"8C",
|
| 31 |
+
# Anterior cingulate cortex
|
| 32 |
+
"p32pr",
|
| 33 |
+
"a32pr",
|
| 34 |
+
"d32",
|
| 35 |
+
"p24",
|
| 36 |
+
"a24",
|
| 37 |
+
# Frontal eye fields
|
| 38 |
+
"FEF",
|
| 39 |
+
"PEF",
|
| 40 |
+
],
|
| 41 |
+
"visual_complexity": [
|
| 42 |
+
# Early visual
|
| 43 |
+
"V1",
|
| 44 |
+
"V2",
|
| 45 |
+
"V3",
|
| 46 |
+
"V4",
|
| 47 |
+
# Ventral stream (object recognition)
|
| 48 |
+
"FFC",
|
| 49 |
+
"VVC",
|
| 50 |
+
"VMV1",
|
| 51 |
+
"VMV2",
|
| 52 |
+
"VMV3",
|
| 53 |
+
# Fusiform
|
| 54 |
+
"PHA1",
|
| 55 |
+
"PHA2",
|
| 56 |
+
"PHA3",
|
| 57 |
+
# Motion / dorsal
|
| 58 |
+
"V3A",
|
| 59 |
+
"V3B",
|
| 60 |
+
"V6",
|
| 61 |
+
"V6A",
|
| 62 |
+
"V7",
|
| 63 |
+
"MT",
|
| 64 |
+
"MST",
|
| 65 |
+
"FST",
|
| 66 |
+
"V4t",
|
| 67 |
+
],
|
| 68 |
+
"auditory_demand": [
|
| 69 |
+
# Primary auditory
|
| 70 |
+
"A1",
|
| 71 |
+
"LBelt",
|
| 72 |
+
"MBelt",
|
| 73 |
+
"PBelt",
|
| 74 |
+
"RI",
|
| 75 |
+
# Auditory association
|
| 76 |
+
"A4",
|
| 77 |
+
"A5",
|
| 78 |
+
"STSdp",
|
| 79 |
+
"STSda",
|
| 80 |
+
"STSvp",
|
| 81 |
+
"STSva",
|
| 82 |
+
"TA2",
|
| 83 |
+
],
|
| 84 |
+
"language_processing": [
|
| 85 |
+
# Broca's area (inferior frontal)
|
| 86 |
+
"44",
|
| 87 |
+
"45",
|
| 88 |
+
"IFJa",
|
| 89 |
+
"IFJp",
|
| 90 |
+
"IFSp",
|
| 91 |
+
"IFSa",
|
| 92 |
+
# Wernicke's area (posterior temporal)
|
| 93 |
+
"TPOJ1",
|
| 94 |
+
"TPOJ2",
|
| 95 |
+
"TPOJ3",
|
| 96 |
+
"STV",
|
| 97 |
+
"PSL",
|
| 98 |
+
# Angular gyrus / semantic
|
| 99 |
+
"PGi",
|
| 100 |
+
"PGs",
|
| 101 |
+
"PFm",
|
| 102 |
+
# Temporal pole
|
| 103 |
+
"TGd",
|
| 104 |
+
"TGv",
|
| 105 |
+
"TE1a",
|
| 106 |
+
"TE1p",
|
| 107 |
+
"TE2a",
|
| 108 |
+
"TE2p",
|
| 109 |
+
],
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
@dataclass
|
| 114 |
+
class CognitiveLoadResult:
|
| 115 |
+
"""Result of cognitive load scoring."""
|
| 116 |
+
|
| 117 |
+
overall_load: float
|
| 118 |
+
visual_complexity: float
|
| 119 |
+
auditory_demand: float
|
| 120 |
+
language_processing: float
|
| 121 |
+
executive_load: float
|
| 122 |
+
timeline: list[tuple[float, dict[str, float]]] = field(default_factory=list)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class CognitiveLoadScorer:
|
| 126 |
+
"""Predict cognitive demand of media content from brain activation patterns.
|
| 127 |
+
|
| 128 |
+
Uses predicted fMRI responses from TRIBE v2 and maps them onto
|
| 129 |
+
cognitive dimensions via HCP MMP1.0 ROI groupings.
|
| 130 |
+
|
| 131 |
+
Example
|
| 132 |
+
-------
|
| 133 |
+
>>> scorer = CognitiveLoadScorer(roi_indices)
|
| 134 |
+
>>> result = scorer.score_predictions(predictions)
|
| 135 |
+
>>> print(f"Overall load: {result.overall_load:.2f}")
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
def __init__(
|
| 139 |
+
self,
|
| 140 |
+
roi_indices: dict[str, np.ndarray],
|
| 141 |
+
cognitive_map: dict[str, list[str]] | None = None,
|
| 142 |
+
baseline_activation: float | None = None,
|
| 143 |
+
):
|
| 144 |
+
"""
|
| 145 |
+
Parameters
|
| 146 |
+
----------
|
| 147 |
+
roi_indices : dict[str, np.ndarray]
|
| 148 |
+
HCP ROI name to vertex index mapping (from ``get_hcp_labels``).
|
| 149 |
+
cognitive_map : dict[str, list[str]], optional
|
| 150 |
+
Override the default cognitive ROI groupings.
|
| 151 |
+
baseline_activation : float, optional
|
| 152 |
+
Baseline activation level for normalisation. If None, uses
|
| 153 |
+
the median activation across all vertices as baseline.
|
| 154 |
+
"""
|
| 155 |
+
self.roi_indices = roi_indices
|
| 156 |
+
self.cognitive_map = cognitive_map or COGNITIVE_ROI_MAP
|
| 157 |
+
self.baseline = baseline_activation
|
| 158 |
+
|
| 159 |
+
@classmethod
|
| 160 |
+
def from_pretrained(
|
| 161 |
+
cls,
|
| 162 |
+
checkpoint_dir: str = "facebook/tribev2",
|
| 163 |
+
**kwargs,
|
| 164 |
+
) -> CognitiveLoadScorer:
|
| 165 |
+
"""Create a scorer with a loaded TRIBE v2 model.
|
| 166 |
+
|
| 167 |
+
Lazily loads the model and ROI indices on first use.
|
| 168 |
+
"""
|
| 169 |
+
instance = cls.__new__(cls)
|
| 170 |
+
instance._checkpoint_dir = checkpoint_dir
|
| 171 |
+
instance._model_kwargs = kwargs
|
| 172 |
+
instance._model = None
|
| 173 |
+
instance.cognitive_map = COGNITIVE_ROI_MAP
|
| 174 |
+
instance.baseline = None
|
| 175 |
+
instance.roi_indices = None
|
| 176 |
+
return instance
|
| 177 |
+
|
| 178 |
+
def _ensure_model(self):
|
| 179 |
+
if self._model is None:
|
| 180 |
+
from cortexlab.data.loader import get_hcp_labels
|
| 181 |
+
from cortexlab.inference.predictor import TribeModel
|
| 182 |
+
|
| 183 |
+
self.roi_indices = get_hcp_labels(mesh="fsaverage5")
|
| 184 |
+
self._model = TribeModel.from_pretrained(
|
| 185 |
+
self._checkpoint_dir, **self._model_kwargs
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
def _get_dimension_activation(
|
| 189 |
+
self, vertex_data: np.ndarray, dimension: str
|
| 190 |
+
) -> float:
|
| 191 |
+
"""Compute mean activation for a cognitive dimension."""
|
| 192 |
+
roi_names = self.cognitive_map.get(dimension, [])
|
| 193 |
+
activations = []
|
| 194 |
+
for roi in roi_names:
|
| 195 |
+
vertices = self.roi_indices.get(roi)
|
| 196 |
+
if vertices is None:
|
| 197 |
+
continue
|
| 198 |
+
valid = vertices[vertices < len(vertex_data)]
|
| 199 |
+
if len(valid) > 0:
|
| 200 |
+
activations.append(np.abs(vertex_data[valid]).mean())
|
| 201 |
+
if not activations:
|
| 202 |
+
return 0.0
|
| 203 |
+
return float(np.mean(activations))
|
| 204 |
+
|
| 205 |
+
def score_predictions(
|
| 206 |
+
self,
|
| 207 |
+
predictions: np.ndarray,
|
| 208 |
+
tr_seconds: float = 1.0,
|
| 209 |
+
) -> CognitiveLoadResult:
|
| 210 |
+
"""Score predicted brain activations for cognitive load.
|
| 211 |
+
|
| 212 |
+
Parameters
|
| 213 |
+
----------
|
| 214 |
+
predictions : np.ndarray
|
| 215 |
+
Predicted brain activations of shape ``(n_timepoints, n_vertices)``.
|
| 216 |
+
tr_seconds : float
|
| 217 |
+
Duration of each TR in seconds (for timeline).
|
| 218 |
+
|
| 219 |
+
Returns
|
| 220 |
+
-------
|
| 221 |
+
CognitiveLoadResult
|
| 222 |
+
"""
|
| 223 |
+
if predictions.ndim == 1:
|
| 224 |
+
predictions = predictions[np.newaxis, :]
|
| 225 |
+
|
| 226 |
+
baseline = self.baseline
|
| 227 |
+
if baseline is None:
|
| 228 |
+
baseline = float(np.median(np.abs(predictions)))
|
| 229 |
+
baseline = max(baseline, 1e-8)
|
| 230 |
+
|
| 231 |
+
dimensions = list(self.cognitive_map.keys())
|
| 232 |
+
dim_scores = {d: [] for d in dimensions}
|
| 233 |
+
timeline = []
|
| 234 |
+
|
| 235 |
+
for t in range(predictions.shape[0]):
|
| 236 |
+
vertex_data = predictions[t]
|
| 237 |
+
t_scores = {}
|
| 238 |
+
for dim in dimensions:
|
| 239 |
+
raw = self._get_dimension_activation(vertex_data, dim)
|
| 240 |
+
normalised = min(raw / baseline, 1.0) if baseline > 0 else 0.0
|
| 241 |
+
dim_scores[dim].append(normalised)
|
| 242 |
+
t_scores[dim] = normalised
|
| 243 |
+
timeline.append((t * tr_seconds, t_scores))
|
| 244 |
+
|
| 245 |
+
avg_scores = {d: float(np.mean(v)) if v else 0.0 for d, v in dim_scores.items()}
|
| 246 |
+
overall = float(np.mean(list(avg_scores.values()))) if avg_scores else 0.0
|
| 247 |
+
|
| 248 |
+
return CognitiveLoadResult(
|
| 249 |
+
overall_load=overall,
|
| 250 |
+
visual_complexity=avg_scores.get("visual_complexity", 0.0),
|
| 251 |
+
auditory_demand=avg_scores.get("auditory_demand", 0.0),
|
| 252 |
+
language_processing=avg_scores.get("language_processing", 0.0),
|
| 253 |
+
executive_load=avg_scores.get("executive_load", 0.0),
|
| 254 |
+
timeline=timeline,
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
def score(self, video_path: str = None, audio_path: str = None, text_path: str = None) -> CognitiveLoadResult:
|
| 258 |
+
"""End-to-end scoring from a media file.
|
| 259 |
+
|
| 260 |
+
Loads the model, runs inference, and scores the predictions.
|
| 261 |
+
Exactly one of the path arguments must be provided.
|
| 262 |
+
"""
|
| 263 |
+
self._ensure_model()
|
| 264 |
+
kwargs = {}
|
| 265 |
+
if video_path:
|
| 266 |
+
kwargs["video_path"] = video_path
|
| 267 |
+
elif audio_path:
|
| 268 |
+
kwargs["audio_path"] = audio_path
|
| 269 |
+
elif text_path:
|
| 270 |
+
kwargs["text_path"] = text_path
|
| 271 |
+
else:
|
| 272 |
+
raise ValueError("Provide one of video_path, audio_path, or text_path")
|
| 273 |
+
|
| 274 |
+
events = self._model.get_events_dataframe(**kwargs)
|
| 275 |
+
predictions, _ = self._model.predict(events, verbose=False)
|
| 276 |
+
return self.score_predictions(predictions)
|
src/cortexlab/core/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cortexlab.core.model import FmriEncoder, FmriEncoderModel, TemporalSmoothing
|
| 2 |
+
|
| 3 |
+
__all__ = ["FmriEncoder", "FmriEncoderModel", "TemporalSmoothing"]
|
src/cortexlab/core/attention.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""ROI attention map extraction from the transformer encoder.
|
| 2 |
+
|
| 3 |
+
Provides :class:`AttentionExtractor`, a context manager that hooks into
|
| 4 |
+
the encoder's attention layers and captures per-head attention weights
|
| 5 |
+
during a forward pass. The raw maps can then be projected onto HCP
|
| 6 |
+
MMP1.0 brain ROIs via :func:`attention_to_roi_scores`.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
from contextlib import contextmanager
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
from torch import nn
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@contextmanager
|
| 22 |
+
def AttentionExtractor(encoder: nn.Module):
|
| 23 |
+
"""Context manager that captures attention weights from transformer layers.
|
| 24 |
+
|
| 25 |
+
Registers forward hooks on every sub-module whose class name contains
|
| 26 |
+
``"Attention"`` (case-insensitive). Each hook stores the second element
|
| 27 |
+
of the output tuple (the attention weights) if the layer returns one,
|
| 28 |
+
or falls back to a ``_attn_weights`` attribute when present.
|
| 29 |
+
|
| 30 |
+
Yields
|
| 31 |
+
------
|
| 32 |
+
attn_maps : list[torch.Tensor]
|
| 33 |
+
Mutable list that accumulates attention tensors of shape
|
| 34 |
+
``(B, heads, T, T)`` as the model runs its forward pass.
|
| 35 |
+
|
| 36 |
+
Example
|
| 37 |
+
-------
|
| 38 |
+
>>> with AttentionExtractor(model.encoder) as maps:
|
| 39 |
+
... out = model(batch)
|
| 40 |
+
>>> len(maps) # one tensor per attention layer
|
| 41 |
+
8
|
| 42 |
+
"""
|
| 43 |
+
attn_maps: list[torch.Tensor] = []
|
| 44 |
+
hooks = []
|
| 45 |
+
|
| 46 |
+
for module in encoder.modules():
|
| 47 |
+
if "attention" in module.__class__.__name__.lower():
|
| 48 |
+
|
| 49 |
+
def _hook(mod, inp, out, store=attn_maps):
|
| 50 |
+
if isinstance(out, tuple) and len(out) >= 2:
|
| 51 |
+
second = out[1]
|
| 52 |
+
if second is not None:
|
| 53 |
+
if hasattr(second, "post_softmax_attn") and second.post_softmax_attn is not None:
|
| 54 |
+
store.append(second.post_softmax_attn.detach())
|
| 55 |
+
elif hasattr(second, "detach"):
|
| 56 |
+
store.append(second.detach())
|
| 57 |
+
elif hasattr(mod, "_attn_weights") and mod._attn_weights is not None:
|
| 58 |
+
store.append(mod._attn_weights.detach())
|
| 59 |
+
|
| 60 |
+
hooks.append(module.register_forward_hook(_hook))
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
yield attn_maps
|
| 64 |
+
finally:
|
| 65 |
+
for h in hooks:
|
| 66 |
+
h.remove()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def attention_to_roi_scores(
|
| 70 |
+
attn_maps: list[torch.Tensor],
|
| 71 |
+
roi_indices: dict[str, np.ndarray],
|
| 72 |
+
predictor_weights: torch.Tensor | None = None,
|
| 73 |
+
) -> dict[str, np.ndarray]:
|
| 74 |
+
"""Project raw attention maps onto brain ROIs.
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
attn_maps : list[torch.Tensor]
|
| 79 |
+
Attention tensors from :class:`AttentionExtractor`, each of shape
|
| 80 |
+
``(B, heads, T, T)``.
|
| 81 |
+
roi_indices : dict[str, np.ndarray]
|
| 82 |
+
Mapping from ROI name to vertex indices (e.g. from
|
| 83 |
+
:func:`cortexlab.data.loader.get_hcp_labels`).
|
| 84 |
+
predictor_weights : torch.Tensor, optional
|
| 85 |
+
Predictor layer weights of shape ``(n_subjects, hidden, n_vertices)``
|
| 86 |
+
or ``(hidden, n_vertices)``. When provided, each ROI's temporal
|
| 87 |
+
profile is weighted by the L2 norm of the predictor weights for
|
| 88 |
+
its vertices, emphasising ROIs the model attends to more strongly.
|
| 89 |
+
|
| 90 |
+
Returns
|
| 91 |
+
-------
|
| 92 |
+
dict[str, np.ndarray]
|
| 93 |
+
Mapping from ROI name to a 1-D temporal importance array of
|
| 94 |
+
length ``T`` (number of time steps in the attention maps).
|
| 95 |
+
"""
|
| 96 |
+
if not attn_maps:
|
| 97 |
+
return {name: np.array([]) for name in roi_indices}
|
| 98 |
+
|
| 99 |
+
# Stack layers, average over batch and heads -> (T, T)
|
| 100 |
+
stacked = torch.stack(attn_maps) # (layers, B, heads, T, T)
|
| 101 |
+
avg_attn = stacked.mean(dim=(0, 1, 2)) # (T, T)
|
| 102 |
+
# Per-timestep importance: sum over keys for each query position
|
| 103 |
+
temporal_importance = avg_attn.sum(dim=-1).cpu().numpy() # (T,)
|
| 104 |
+
|
| 105 |
+
roi_scores: dict[str, np.ndarray] = {}
|
| 106 |
+
for name, vertices in roi_indices.items():
|
| 107 |
+
if predictor_weights is not None:
|
| 108 |
+
w = predictor_weights
|
| 109 |
+
if w.ndim == 3:
|
| 110 |
+
w = w.mean(dim=0) # average across subjects
|
| 111 |
+
# Weight = L2 norm of predictor weights for this ROI's vertices
|
| 112 |
+
roi_weight = w[:, vertices].norm(dim=0).mean().item()
|
| 113 |
+
else:
|
| 114 |
+
roi_weight = 1.0
|
| 115 |
+
roi_scores[name] = temporal_importance * roi_weight
|
| 116 |
+
|
| 117 |
+
return roi_scores
|
src/cortexlab/core/model.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
#
|
| 7 |
+
# Modified by CortexLab contributors: added return_attn support and
|
| 8 |
+
# compile_backbone option.
|
| 9 |
+
|
| 10 |
+
import logging
|
| 11 |
+
import typing as tp
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from einops import rearrange
|
| 15 |
+
from neuralset.dataloader import SegmentData
|
| 16 |
+
from neuraltrain.models.base import BaseModelConfig
|
| 17 |
+
from neuraltrain.models.common import Mlp, SubjectLayers, SubjectLayersModel
|
| 18 |
+
from neuraltrain.models.transformer import TransformerEncoder
|
| 19 |
+
from torch import nn
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class TemporalSmoothing(BaseModelConfig):
|
| 25 |
+
kernel_size: int = 9
|
| 26 |
+
sigma: float | None = None
|
| 27 |
+
|
| 28 |
+
def build(self, dim: int) -> nn.Module:
|
| 29 |
+
|
| 30 |
+
def gaussian_kernel_1d(kernel_size: int, sigma: float):
|
| 31 |
+
x = torch.arange(kernel_size) - kernel_size // 2
|
| 32 |
+
kernel = torch.exp(-0.5 * (x / sigma) ** 2)
|
| 33 |
+
kernel = kernel / kernel.sum()
|
| 34 |
+
return kernel.view(1, 1, -1)
|
| 35 |
+
|
| 36 |
+
conv = nn.Conv1d(
|
| 37 |
+
dim,
|
| 38 |
+
dim,
|
| 39 |
+
kernel_size=self.kernel_size,
|
| 40 |
+
padding=self.kernel_size // 2,
|
| 41 |
+
bias=False,
|
| 42 |
+
groups=dim,
|
| 43 |
+
)
|
| 44 |
+
if self.sigma is not None:
|
| 45 |
+
kernel = gaussian_kernel_1d(kernel_size=self.kernel_size, sigma=self.sigma)
|
| 46 |
+
kernel = kernel.repeat(dim, 1, 1)
|
| 47 |
+
conv.weight.data = kernel
|
| 48 |
+
conv.requires_grad = False
|
| 49 |
+
return conv
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class FmriEncoder(BaseModelConfig):
|
| 53 |
+
|
| 54 |
+
# architecture
|
| 55 |
+
projector: BaseModelConfig = Mlp(norm_layer="layer", activation_layer="gelu")
|
| 56 |
+
combiner: Mlp | None = Mlp(norm_layer="layer", activation_layer="gelu")
|
| 57 |
+
encoder: TransformerEncoder | None = TransformerEncoder()
|
| 58 |
+
# other hyperparameters
|
| 59 |
+
time_pos_embedding: bool = True
|
| 60 |
+
subject_embedding: bool = False
|
| 61 |
+
subject_layers: SubjectLayers | None = SubjectLayers()
|
| 62 |
+
hidden: int = 256
|
| 63 |
+
max_seq_len: int = 1024
|
| 64 |
+
dropout: float = 0.0
|
| 65 |
+
extractor_aggregation: tp.Literal["stack", "sum", "cat"] = "cat"
|
| 66 |
+
layer_aggregation: tp.Literal["mean", "cat"] = "cat"
|
| 67 |
+
linear_baseline: bool = False
|
| 68 |
+
modality_dropout: float = 0.0
|
| 69 |
+
temporal_dropout: float = 0.0
|
| 70 |
+
low_rank_head: int | None = None
|
| 71 |
+
temporal_smoothing: TemporalSmoothing | None = None
|
| 72 |
+
# CortexLab additions
|
| 73 |
+
compile_backbone: bool = False
|
| 74 |
+
|
| 75 |
+
def model_post_init(self, __context):
|
| 76 |
+
if self.encoder is not None:
|
| 77 |
+
for key in ["attn_dropout", "ff_dropout", "layer_dropout"]:
|
| 78 |
+
setattr(self.encoder, key, self.dropout)
|
| 79 |
+
if hasattr(self.projector, "dropout"):
|
| 80 |
+
self.projector.dropout = self.dropout
|
| 81 |
+
return super().model_post_init(__context)
|
| 82 |
+
|
| 83 |
+
def build(
|
| 84 |
+
self, feature_dims: dict[int], n_outputs: int, n_output_timesteps: int
|
| 85 |
+
) -> nn.Module:
|
| 86 |
+
return FmriEncoderModel(
|
| 87 |
+
feature_dims,
|
| 88 |
+
n_outputs,
|
| 89 |
+
n_output_timesteps,
|
| 90 |
+
config=self,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class FmriEncoderModel(nn.Module):
|
| 95 |
+
|
| 96 |
+
def __init__(
|
| 97 |
+
self,
|
| 98 |
+
feature_dims: dict[str, tuple[int, int]],
|
| 99 |
+
n_outputs: int,
|
| 100 |
+
n_output_timesteps: int,
|
| 101 |
+
config: FmriEncoder,
|
| 102 |
+
):
|
| 103 |
+
super().__init__()
|
| 104 |
+
self.config = config
|
| 105 |
+
self.feature_dims = feature_dims
|
| 106 |
+
self.n_outputs = n_outputs
|
| 107 |
+
self.n_output_timesteps = n_output_timesteps
|
| 108 |
+
self.projectors = nn.ModuleDict()
|
| 109 |
+
self.pooler = nn.AdaptiveAvgPool1d(n_output_timesteps)
|
| 110 |
+
hidden = config.hidden
|
| 111 |
+
for modality, tup in feature_dims.items():
|
| 112 |
+
if tup is None:
|
| 113 |
+
logger.warning(
|
| 114 |
+
"%s has no feature dimensions. Skipping projector.", modality
|
| 115 |
+
)
|
| 116 |
+
continue
|
| 117 |
+
else:
|
| 118 |
+
num_layers, feature_dim = tup
|
| 119 |
+
input_dim = (
|
| 120 |
+
feature_dim * num_layers
|
| 121 |
+
if config.layer_aggregation == "cat"
|
| 122 |
+
else feature_dim
|
| 123 |
+
)
|
| 124 |
+
output_dim = (
|
| 125 |
+
hidden // len(feature_dims)
|
| 126 |
+
if config.extractor_aggregation == "cat"
|
| 127 |
+
else hidden
|
| 128 |
+
)
|
| 129 |
+
self.projectors[modality] = self.config.projector.build(
|
| 130 |
+
input_dim, output_dim
|
| 131 |
+
)
|
| 132 |
+
input_dim = (
|
| 133 |
+
(hidden // len(feature_dims)) * len(feature_dims)
|
| 134 |
+
if config.extractor_aggregation == "cat"
|
| 135 |
+
else hidden
|
| 136 |
+
)
|
| 137 |
+
if self.config.combiner is not None:
|
| 138 |
+
self.combiner = self.config.combiner.build(input_dim, hidden)
|
| 139 |
+
else:
|
| 140 |
+
assert (
|
| 141 |
+
hidden % len(feature_dims) == 0
|
| 142 |
+
), "hidden must be divisible by the number of modalities if there is no combiner"
|
| 143 |
+
self.combiner = nn.Identity()
|
| 144 |
+
if config.low_rank_head is not None:
|
| 145 |
+
self.low_rank_head = nn.Linear(hidden, config.low_rank_head, bias=False)
|
| 146 |
+
bottleneck = config.low_rank_head
|
| 147 |
+
else:
|
| 148 |
+
bottleneck = hidden
|
| 149 |
+
self.predictor = config.subject_layers.build(
|
| 150 |
+
in_channels=bottleneck,
|
| 151 |
+
out_channels=n_outputs,
|
| 152 |
+
)
|
| 153 |
+
if config.temporal_smoothing is not None:
|
| 154 |
+
self.temporal_smoothing = config.temporal_smoothing.build(dim=hidden)
|
| 155 |
+
if not config.linear_baseline:
|
| 156 |
+
if config.time_pos_embedding:
|
| 157 |
+
self.time_pos_embed = nn.Parameter(
|
| 158 |
+
torch.randn(1, config.max_seq_len, hidden)
|
| 159 |
+
)
|
| 160 |
+
if config.subject_embedding:
|
| 161 |
+
self.subject_embed = nn.Embedding(config.n_subjects, hidden)
|
| 162 |
+
self.encoder = config.encoder.build(dim=hidden)
|
| 163 |
+
if config.compile_backbone:
|
| 164 |
+
self.encoder = torch.compile(self.encoder)
|
| 165 |
+
|
| 166 |
+
@property
|
| 167 |
+
def device(self) -> torch.device:
|
| 168 |
+
return next(self.parameters()).device
|
| 169 |
+
|
| 170 |
+
def forward(
|
| 171 |
+
self,
|
| 172 |
+
batch: SegmentData,
|
| 173 |
+
pool_outputs: bool = True,
|
| 174 |
+
return_attn: bool = False,
|
| 175 |
+
) -> torch.Tensor | tuple[torch.Tensor, list[torch.Tensor]]:
|
| 176 |
+
"""Run forward pass.
|
| 177 |
+
|
| 178 |
+
Parameters
|
| 179 |
+
----------
|
| 180 |
+
batch : SegmentData
|
| 181 |
+
Input batch with modality features.
|
| 182 |
+
pool_outputs : bool
|
| 183 |
+
Whether to pool temporal outputs to n_output_timesteps.
|
| 184 |
+
return_attn : bool
|
| 185 |
+
If True, return (predictions, attention_weights) tuple.
|
| 186 |
+
Attention weights are collected via hooks on the encoder's
|
| 187 |
+
attention layers during the forward pass.
|
| 188 |
+
|
| 189 |
+
Returns
|
| 190 |
+
-------
|
| 191 |
+
torch.Tensor or tuple[torch.Tensor, list[torch.Tensor]]
|
| 192 |
+
Predictions of shape (B, n_outputs, T), or a tuple of
|
| 193 |
+
(predictions, attention_maps) when return_attn is True.
|
| 194 |
+
"""
|
| 195 |
+
attn_maps = []
|
| 196 |
+
hooks = []
|
| 197 |
+
if return_attn and hasattr(self, "encoder"):
|
| 198 |
+
for module in self.encoder.modules():
|
| 199 |
+
if "attention" in module.__class__.__name__.lower():
|
| 200 |
+
def _hook(mod, inp, out, store=attn_maps):
|
| 201 |
+
if isinstance(out, tuple) and len(out) >= 2:
|
| 202 |
+
second = out[1]
|
| 203 |
+
if second is not None:
|
| 204 |
+
if hasattr(second, "post_softmax_attn") and second.post_softmax_attn is not None:
|
| 205 |
+
store.append(second.post_softmax_attn.detach())
|
| 206 |
+
elif isinstance(second, torch.Tensor):
|
| 207 |
+
store.append(second.detach())
|
| 208 |
+
elif hasattr(mod, "_attn_weights") and mod._attn_weights is not None:
|
| 209 |
+
store.append(mod._attn_weights.detach())
|
| 210 |
+
hooks.append(module.register_forward_hook(_hook))
|
| 211 |
+
|
| 212 |
+
x = self.aggregate_features(batch) # B, T, H
|
| 213 |
+
subject_id = batch.data.get("subject_id", None)
|
| 214 |
+
if hasattr(self, "temporal_smoothing"):
|
| 215 |
+
x = self.temporal_smoothing(x.transpose(1, 2)).transpose(1, 2)
|
| 216 |
+
if not self.config.linear_baseline:
|
| 217 |
+
x = self.transformer_forward(x, subject_id)
|
| 218 |
+
x = x.transpose(1, 2) # B, H, T
|
| 219 |
+
if self.config.low_rank_head is not None:
|
| 220 |
+
x = self.low_rank_head(x.transpose(1, 2)).transpose(1, 2)
|
| 221 |
+
x = self.predictor(x, subject_id) # B, O, T
|
| 222 |
+
if pool_outputs:
|
| 223 |
+
out = self.pooler(x) # B, O, T'
|
| 224 |
+
else:
|
| 225 |
+
out = x
|
| 226 |
+
|
| 227 |
+
for h in hooks:
|
| 228 |
+
h.remove()
|
| 229 |
+
|
| 230 |
+
if return_attn:
|
| 231 |
+
return out, attn_maps
|
| 232 |
+
return out
|
| 233 |
+
|
| 234 |
+
def aggregate_features(self, batch):
|
| 235 |
+
tensors = []
|
| 236 |
+
# get B, T
|
| 237 |
+
for modality in batch.data.keys():
|
| 238 |
+
if modality in self.feature_dims:
|
| 239 |
+
break
|
| 240 |
+
x = batch.data[modality]
|
| 241 |
+
B, T = x.shape[0], x.shape[-1]
|
| 242 |
+
for modality in self.feature_dims.keys():
|
| 243 |
+
if modality not in self.projectors or modality not in batch.data:
|
| 244 |
+
data = torch.zeros(
|
| 245 |
+
B, T, self.config.hidden // len(self.feature_dims)
|
| 246 |
+
).to(x.device)
|
| 247 |
+
else:
|
| 248 |
+
data = batch.data[modality] # B, L, H, T
|
| 249 |
+
data = data.to(torch.float32)
|
| 250 |
+
if data.ndim == 3:
|
| 251 |
+
data = data.unsqueeze(1)
|
| 252 |
+
# mean over layers
|
| 253 |
+
if self.config.layer_aggregation == "mean":
|
| 254 |
+
data = data.mean(dim=1)
|
| 255 |
+
elif self.config.layer_aggregation == "cat":
|
| 256 |
+
data = rearrange(data, "b l d t -> b (l d) t")
|
| 257 |
+
data = data.transpose(1, 2)
|
| 258 |
+
assert data.ndim == 3 # B, T, D
|
| 259 |
+
if isinstance(self.projectors[modality], SubjectLayersModel):
|
| 260 |
+
data = self.projectors[modality](
|
| 261 |
+
data.transpose(1, 2), batch.data["subject_id"]
|
| 262 |
+
).transpose(1, 2)
|
| 263 |
+
else:
|
| 264 |
+
data = self.projectors[modality](data) # B, T, H
|
| 265 |
+
if self.config.modality_dropout > 0 and self.training:
|
| 266 |
+
mask = torch.rand(data.shape[0]) < self.config.modality_dropout
|
| 267 |
+
data[mask, :] = torch.zeros_like(data[mask, :])
|
| 268 |
+
tensors.append(data)
|
| 269 |
+
if self.config.extractor_aggregation == "stack":
|
| 270 |
+
out = torch.cat(tensors, dim=1)
|
| 271 |
+
elif self.config.extractor_aggregation == "cat":
|
| 272 |
+
out = torch.cat(tensors, dim=-1)
|
| 273 |
+
elif self.config.extractor_aggregation == "sum":
|
| 274 |
+
out = sum(tensors)
|
| 275 |
+
if self.config.temporal_dropout > 0 and self.training:
|
| 276 |
+
for batch_idx in range(out.shape[0]):
|
| 277 |
+
mask = torch.rand(out.shape[1]) < self.config.temporal_dropout
|
| 278 |
+
out[batch_idx, mask, :] = torch.zeros_like(out[batch_idx, mask, :])
|
| 279 |
+
return out
|
| 280 |
+
|
| 281 |
+
def transformer_forward(self, x, subject_id=None):
|
| 282 |
+
x = self.combiner(x)
|
| 283 |
+
if hasattr(self, "time_pos_embed"):
|
| 284 |
+
x = x + self.time_pos_embed[:, : x.size(1)]
|
| 285 |
+
if hasattr(self, "subject_embed"):
|
| 286 |
+
x = x + self.subject_embed(subject_id)
|
| 287 |
+
x = self.encoder(x)
|
| 288 |
+
return x
|
src/cortexlab/core/subject.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Cross-subject adaptation for the fMRI encoder.
|
| 2 |
+
|
| 3 |
+
Provides :class:`SubjectAdapter` with two strategies for adapting the
|
| 4 |
+
pre-trained model to a new, unseen subject from a small calibration set:
|
| 5 |
+
|
| 6 |
+
* **Ridge regression** fits a new predictor head from calibration fMRI.
|
| 7 |
+
* **Nearest-neighbour** picks the most similar training subject (zero-shot).
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
from torch import nn
|
| 17 |
+
from torch.utils.data import DataLoader
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class SubjectAdapter:
|
| 23 |
+
"""Holds new-subject predictor weights ready for injection into a model."""
|
| 24 |
+
|
| 25 |
+
def __init__(self, weights: torch.Tensor, bias: torch.Tensor | None = None):
|
| 26 |
+
self._weights = weights # (1, in_channels, n_outputs)
|
| 27 |
+
self._bias = bias # (1, n_outputs) or None
|
| 28 |
+
|
| 29 |
+
@classmethod
|
| 30 |
+
def from_ridge(
|
| 31 |
+
cls,
|
| 32 |
+
model: nn.Module,
|
| 33 |
+
calibration_loader: DataLoader,
|
| 34 |
+
regularization: float = 1e-3,
|
| 35 |
+
device: str | torch.device = "cpu",
|
| 36 |
+
) -> SubjectAdapter:
|
| 37 |
+
"""Fit a ridge-regression predictor head from calibration data.
|
| 38 |
+
|
| 39 |
+
Runs the frozen backbone on every batch in *calibration_loader*,
|
| 40 |
+
collects hidden states and fMRI targets, then solves the normal
|
| 41 |
+
equations ``W* = (X^T X + lambda I)^{-1} X^T y``.
|
| 42 |
+
|
| 43 |
+
Parameters
|
| 44 |
+
----------
|
| 45 |
+
model : nn.Module
|
| 46 |
+
The :class:`FmriEncoderModel` instance (must be in eval mode).
|
| 47 |
+
calibration_loader : DataLoader
|
| 48 |
+
Provides ``(batch, ...)`` where ``batch.data["fmri"]`` contains
|
| 49 |
+
ground-truth fMRI for the new subject.
|
| 50 |
+
regularization : float
|
| 51 |
+
Ridge penalty (lambda).
|
| 52 |
+
device : str or torch.device
|
| 53 |
+
Device for computation.
|
| 54 |
+
"""
|
| 55 |
+
model.eval()
|
| 56 |
+
all_hidden, all_targets = [], []
|
| 57 |
+
|
| 58 |
+
with torch.inference_mode():
|
| 59 |
+
for batch in calibration_loader:
|
| 60 |
+
batch = batch.to(device)
|
| 61 |
+
x = model.aggregate_features(batch)
|
| 62 |
+
if hasattr(model, "temporal_smoothing"):
|
| 63 |
+
x = model.temporal_smoothing(x.transpose(1, 2)).transpose(1, 2)
|
| 64 |
+
if not model.config.linear_baseline:
|
| 65 |
+
x = model.transformer_forward(x)
|
| 66 |
+
x = x.transpose(1, 2) # B, H, T
|
| 67 |
+
if model.config.low_rank_head is not None:
|
| 68 |
+
x = model.low_rank_head(x.transpose(1, 2)).transpose(1, 2)
|
| 69 |
+
# Pool to match target temporal dimension
|
| 70 |
+
x = model.pooler(x) # B, H, T'
|
| 71 |
+
target = batch.data["fmri"] # B, V, T'
|
| 72 |
+
|
| 73 |
+
# Flatten time: (B*T', H) and (B*T', V)
|
| 74 |
+
B, H, T = x.shape
|
| 75 |
+
x_flat = x.permute(0, 2, 1).reshape(-1, H)
|
| 76 |
+
t_flat = target.permute(0, 2, 1).reshape(-1, target.shape[1])
|
| 77 |
+
all_hidden.append(x_flat.cpu())
|
| 78 |
+
all_targets.append(t_flat.cpu())
|
| 79 |
+
|
| 80 |
+
X = torch.cat(all_hidden, dim=0).float() # (N, H)
|
| 81 |
+
Y = torch.cat(all_targets, dim=0).float() # (N, V)
|
| 82 |
+
|
| 83 |
+
# Ridge regression: W = (X^T X + lambda I)^{-1} X^T Y
|
| 84 |
+
XtX = X.T @ X
|
| 85 |
+
reg = regularization * torch.eye(XtX.shape[0])
|
| 86 |
+
W = torch.linalg.solve(XtX + reg, X.T @ Y) # (H, V)
|
| 87 |
+
weights = W.unsqueeze(0) # (1, H, V)
|
| 88 |
+
|
| 89 |
+
logger.info(
|
| 90 |
+
"Ridge adapter fitted: %d samples, hidden=%d, vertices=%d",
|
| 91 |
+
X.shape[0],
|
| 92 |
+
X.shape[1],
|
| 93 |
+
Y.shape[1],
|
| 94 |
+
)
|
| 95 |
+
return cls(weights=weights)
|
| 96 |
+
|
| 97 |
+
@classmethod
|
| 98 |
+
def from_nearest_neighbor(
|
| 99 |
+
cls,
|
| 100 |
+
model: nn.Module,
|
| 101 |
+
calibration_loader: DataLoader,
|
| 102 |
+
device: str | torch.device = "cpu",
|
| 103 |
+
) -> SubjectAdapter:
|
| 104 |
+
"""Zero-shot adaptation by finding the closest training subject.
|
| 105 |
+
|
| 106 |
+
Computes a mean hidden-state signature for the new subject and
|
| 107 |
+
matches it to each training subject's predictor weight signature
|
| 108 |
+
via cosine similarity.
|
| 109 |
+
|
| 110 |
+
Parameters
|
| 111 |
+
----------
|
| 112 |
+
model : nn.Module
|
| 113 |
+
The :class:`FmriEncoderModel` instance.
|
| 114 |
+
calibration_loader : DataLoader
|
| 115 |
+
Provides calibration batches for the new subject.
|
| 116 |
+
device : str or torch.device
|
| 117 |
+
Device for computation.
|
| 118 |
+
"""
|
| 119 |
+
model.eval()
|
| 120 |
+
all_hidden = []
|
| 121 |
+
|
| 122 |
+
with torch.inference_mode():
|
| 123 |
+
for batch in calibration_loader:
|
| 124 |
+
batch = batch.to(device)
|
| 125 |
+
x = model.aggregate_features(batch)
|
| 126 |
+
if hasattr(model, "temporal_smoothing"):
|
| 127 |
+
x = model.temporal_smoothing(x.transpose(1, 2)).transpose(1, 2)
|
| 128 |
+
if not model.config.linear_baseline:
|
| 129 |
+
x = model.transformer_forward(x)
|
| 130 |
+
all_hidden.append(x.mean(dim=(0, 1)).cpu())
|
| 131 |
+
|
| 132 |
+
new_sig = torch.stack(all_hidden).mean(dim=0) # (H,)
|
| 133 |
+
|
| 134 |
+
# Compare against each training subject's predictor weights
|
| 135 |
+
pred_weights = model.predictor.weights # (n_subjects, in_ch, out_ch)
|
| 136 |
+
n_subjects = pred_weights.shape[0]
|
| 137 |
+
best_sim, best_idx = -1.0, 0
|
| 138 |
+
for i in range(n_subjects):
|
| 139 |
+
w_sig = pred_weights[i].mean(dim=-1).cpu() # (in_ch,)
|
| 140 |
+
# Truncate/pad to match dimensions if needed
|
| 141 |
+
dim = min(w_sig.shape[0], new_sig.shape[0])
|
| 142 |
+
sim = torch.nn.functional.cosine_similarity(
|
| 143 |
+
w_sig[:dim].unsqueeze(0), new_sig[:dim].unsqueeze(0)
|
| 144 |
+
).item()
|
| 145 |
+
if sim > best_sim:
|
| 146 |
+
best_sim = sim
|
| 147 |
+
best_idx = i
|
| 148 |
+
|
| 149 |
+
logger.info(
|
| 150 |
+
"Nearest-neighbour match: subject %d (cosine sim %.4f)", best_idx, best_sim
|
| 151 |
+
)
|
| 152 |
+
weights = pred_weights[best_idx : best_idx + 1].detach().cpu()
|
| 153 |
+
bias = None
|
| 154 |
+
if hasattr(model.predictor, "bias") and model.predictor.bias is not None:
|
| 155 |
+
bias = model.predictor.bias[best_idx : best_idx + 1].detach().cpu()
|
| 156 |
+
return cls(weights=weights, bias=bias)
|
| 157 |
+
|
| 158 |
+
def inject_into_model(self, model: nn.Module) -> int:
|
| 159 |
+
"""Append the adapted weights as a new subject in the predictor.
|
| 160 |
+
|
| 161 |
+
Returns the integer subject ID assigned to the new subject.
|
| 162 |
+
"""
|
| 163 |
+
pred = model.predictor
|
| 164 |
+
old_weights = pred.weights.data # (n_subjects, in_ch, out_ch)
|
| 165 |
+
new_weights = self._weights.to(old_weights.device)
|
| 166 |
+
pred.weights = nn.Parameter(
|
| 167 |
+
torch.cat([old_weights, new_weights], dim=0)
|
| 168 |
+
)
|
| 169 |
+
new_id = old_weights.shape[0]
|
| 170 |
+
|
| 171 |
+
if self._bias is not None and hasattr(pred, "bias") and pred.bias is not None:
|
| 172 |
+
old_bias = pred.bias.data
|
| 173 |
+
new_bias = self._bias.to(old_bias.device)
|
| 174 |
+
pred.bias = nn.Parameter(torch.cat([old_bias, new_bias], dim=0))
|
| 175 |
+
|
| 176 |
+
logger.info("Injected new subject as ID %d", new_id)
|
| 177 |
+
return new_id
|
src/cortexlab/data/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cortexlab.data.loader import (
|
| 2 |
+
MultiStudyLoader,
|
| 3 |
+
get_hcp_labels,
|
| 4 |
+
get_hcp_roi_indices,
|
| 5 |
+
get_topk_rois,
|
| 6 |
+
summarize_by_roi,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
"MultiStudyLoader",
|
| 11 |
+
"get_hcp_labels",
|
| 12 |
+
"get_hcp_roi_indices",
|
| 13 |
+
"get_topk_rois",
|
| 14 |
+
"summarize_by_roi",
|
| 15 |
+
]
|
src/cortexlab/data/fmri.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import re
|
| 8 |
+
import typing as tp
|
| 9 |
+
from enum import Enum
|
| 10 |
+
|
| 11 |
+
import neuralset as ns
|
| 12 |
+
import numpy as np
|
| 13 |
+
import pydantic
|
| 14 |
+
from neuralset.extractors.neuro import FSAVERAGE_SIZES
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _FmriTemplateSpaceSpec(tp.NamedTuple):
|
| 18 |
+
id: str
|
| 19 |
+
shape: tp.Tuple[int, int, int] | None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class FmriTemplateSpace(Enum):
|
| 23 |
+
# MNI - TEMPLATEFLOW (partial)
|
| 24 |
+
# We keep only 1mm-resolution variants as res mapping is handled by vol_to_surf
|
| 25 |
+
MNI152LIN_RES_01 = _FmriTemplateSpaceSpec("tpl-MNI152Lin_res-01", (181, 217, 181))
|
| 26 |
+
MNI152NLIN2009A_ASYM_RES_1 = _FmriTemplateSpaceSpec(
|
| 27 |
+
"tpl-MNI152NLin2009aAsym_res-1", (197, 233, 189)
|
| 28 |
+
)
|
| 29 |
+
MNI152NLIN2009A_SYM_RES_1 = _FmriTemplateSpaceSpec(
|
| 30 |
+
"tpl-MNI152NLin2009aSym_res-1", (197, 233, 189)
|
| 31 |
+
)
|
| 32 |
+
MNI152NLIN2009C_ASYM_RES_01 = _FmriTemplateSpaceSpec(
|
| 33 |
+
"tpl-MNI152NLin2009cAsym_res-01", (193, 229, 193)
|
| 34 |
+
)
|
| 35 |
+
MNI152NLIN2009C_SYM_RES_1 = _FmriTemplateSpaceSpec(
|
| 36 |
+
"tpl-MNI152NLin2009cSym_res-1", (193, 229, 193)
|
| 37 |
+
)
|
| 38 |
+
MNI152NLIN6_ASYM_RES_01 = _FmriTemplateSpaceSpec(
|
| 39 |
+
"tpl-MNI152NLin6Asym_res-01", (182, 218, 182)
|
| 40 |
+
)
|
| 41 |
+
MNI152NLIN6_SYM_RES_01 = _FmriTemplateSpaceSpec(
|
| 42 |
+
"tpl-MNI152NLin6Asym_res-01", (193, 229, 193)
|
| 43 |
+
)
|
| 44 |
+
MNI305 = _FmriTemplateSpaceSpec("tpl-MNI305", (172, 220, 156))
|
| 45 |
+
MNICOLIN27 = _FmriTemplateSpaceSpec("tpl-MNIColin27", (181, 217, 181))
|
| 46 |
+
|
| 47 |
+
# FSAVERAGE
|
| 48 |
+
FSAVERAGE = _FmriTemplateSpaceSpec("fsaverage", (163842,))
|
| 49 |
+
FSAVERAGE_6 = _FmriTemplateSpaceSpec("fsaverage6", (40962,))
|
| 50 |
+
FSAVERAGE_5 = _FmriTemplateSpaceSpec("fsaverage5", (10242,))
|
| 51 |
+
FSAVERAGE_4 = _FmriTemplateSpaceSpec("fsaverage4", (2562,))
|
| 52 |
+
FSAVERAGE_3 = _FmriTemplateSpaceSpec("fsaverage3", (642,))
|
| 53 |
+
|
| 54 |
+
# CIFTI
|
| 55 |
+
CIFTI_HCP_FS_LR_32K = _FmriTemplateSpaceSpec("cifti-hcp-fs_LR_32k", (59412,))
|
| 56 |
+
CIFTI_HCP_FS_LR_164K = _FmriTemplateSpaceSpec("cifti-hcp-fs_LR_164k", (170494,))
|
| 57 |
+
|
| 58 |
+
# NATIVE
|
| 59 |
+
T1W = _FmriTemplateSpaceSpec("T1w", None)
|
| 60 |
+
|
| 61 |
+
# OTHER
|
| 62 |
+
MNI_UNKNOWN = _FmriTemplateSpaceSpec("MNI_unknown", None) # unknown MNI space
|
| 63 |
+
UNKNOWN = _FmriTemplateSpaceSpec("unknown", None) # unknown space
|
| 64 |
+
CUSTOM = _FmriTemplateSpaceSpec(
|
| 65 |
+
"custom", None
|
| 66 |
+
) # custom space e.g. provided by study authors
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def is_mni_space(space: FmriTemplateSpace) -> bool:
|
| 70 |
+
"""
|
| 71 |
+
Check if the given template space is an MNI space.
|
| 72 |
+
"""
|
| 73 |
+
return space.name.startswith("MNI")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def load_mni_mesh(
|
| 77 |
+
template: FmriTemplateSpace,
|
| 78 |
+
target_space="fsaverage",
|
| 79 |
+
base_path: str | None = None,
|
| 80 |
+
) -> dict:
|
| 81 |
+
"""
|
| 82 |
+
Load MNI surface meshes for both hemispheres and white / pial surfaces.
|
| 83 |
+
|
| 84 |
+
Parameters
|
| 85 |
+
----------
|
| 86 |
+
template : FmriTemplateSpace
|
| 87 |
+
target_space : str
|
| 88 |
+
base_path : str or None
|
| 89 |
+
Root directory containing FreeSurfer subjects. If ``None``, reads
|
| 90 |
+
from the ``FREESURFER_SUBJECTS_DIR`` environment variable.
|
| 91 |
+
|
| 92 |
+
Returns
|
| 93 |
+
-------
|
| 94 |
+
meshes : dict
|
| 95 |
+
Dictionary with keys like 'pial_left', 'pial_right', 'white_left', 'white_right'
|
| 96 |
+
and values as loaded nilearn surface meshes.
|
| 97 |
+
"""
|
| 98 |
+
import os
|
| 99 |
+
|
| 100 |
+
if not re.match(r"^fsaverage[3-6]?$", target_space):
|
| 101 |
+
raise ValueError(
|
| 102 |
+
f"target_space must be 'fsaverage' or 'fsaverage3/4/5/6', got '{target_space}'"
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
if not is_mni_space(template):
|
| 106 |
+
raise ValueError(
|
| 107 |
+
f"Template {template.value.id} is required to be an MNI space."
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
if base_path is None:
|
| 111 |
+
base_path = os.getenv("FREESURFER_SUBJECTS_DIR")
|
| 112 |
+
if base_path is None:
|
| 113 |
+
raise EnvironmentError(
|
| 114 |
+
"Set the FREESURFER_SUBJECTS_DIR environment variable to the "
|
| 115 |
+
"directory containing FreeSurfer subjects, or pass base_path explicitly."
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
from nilearn.surface import load_surf_mesh
|
| 119 |
+
|
| 120 |
+
mesh_dir = os.path.join(base_path, template.value.id, "surf", "surf_hybrid_mni_gii")
|
| 121 |
+
meshes = {}
|
| 122 |
+
for surf in ["pial", "white"]:
|
| 123 |
+
for hemi in ["left", "right"]:
|
| 124 |
+
mesh_path = os.path.join(mesh_dir, f"{hemi[0]}h.{surf}.{target_space}.gii")
|
| 125 |
+
meshes[f"{surf}_{hemi}"] = load_surf_mesh(mesh_path)
|
| 126 |
+
return meshes
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class TribeSurfaceProjector(ns.extractors.neuro.SurfaceProjector):
|
| 130 |
+
"""Project data to an fsaverage surface mesh.
|
| 131 |
+
For volumetric data, this uses ``nilearn.surface.vol_to_surf`` to project the data to the surface.
|
| 132 |
+
For surface data, this simply downsamples the data to the target mesh resolution.
|
| 133 |
+
|
| 134 |
+
Fields beyond ``mesh`` mirror the keyword arguments of
|
| 135 |
+
``nilearn.surface.vol_to_surf`` and are forwarded to it.
|
| 136 |
+
|
| 137 |
+
Examples
|
| 138 |
+
--------
|
| 139 |
+
>>> SurfaceProjector(mesh="fsaverage5")
|
| 140 |
+
>>> SurfaceProjector(mesh="fsaverage6", radius=5.0, interpolation="nearest")
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
mesh: str
|
| 144 |
+
radius: float = 3.0
|
| 145 |
+
interpolation: tp.Literal["linear", "nearest"] = "linear"
|
| 146 |
+
kind: tp.Literal["auto", "line", "ball"] = "auto"
|
| 147 |
+
n_samples: int | None = None
|
| 148 |
+
mask_img: tp.Any | None = None
|
| 149 |
+
depth: list[float] | None = None
|
| 150 |
+
center_depth: float = 1
|
| 151 |
+
extract_fsaverage_from_mni: bool = False
|
| 152 |
+
|
| 153 |
+
_mesh: tp.Any | None = pydantic.PrivateAttr(default=None)
|
| 154 |
+
|
| 155 |
+
def model_post_init(self, __context: tp.Any) -> None:
|
| 156 |
+
super().model_post_init(__context)
|
| 157 |
+
assert (
|
| 158 |
+
self.center_depth >= 0 and self.center_depth <= 1
|
| 159 |
+
), "center_depth must be between 0 and 1"
|
| 160 |
+
if self.mesh not in FSAVERAGE_SIZES:
|
| 161 |
+
raise ValueError(f"mesh must be an fsaverage mesh (got {self.mesh!r})")
|
| 162 |
+
|
| 163 |
+
def get_mesh(self) -> tp.Any:
|
| 164 |
+
if self._mesh is None:
|
| 165 |
+
if self.extract_fsaverage_from_mni:
|
| 166 |
+
mni_template_spec = FmriTemplateSpace["MNI152NLIN2009C_ASYM_RES_01"]
|
| 167 |
+
fsaverage = load_mni_mesh(mni_template_spec, self.mesh)
|
| 168 |
+
else:
|
| 169 |
+
from nilearn import datasets
|
| 170 |
+
|
| 171 |
+
fsaverage = datasets.fetch_surf_fsaverage(self.mesh)
|
| 172 |
+
self._mesh = fsaverage
|
| 173 |
+
return self._mesh
|
| 174 |
+
|
| 175 |
+
def get_intermediate_mesh(
|
| 176 |
+
self, hemi: str, center_depth: float = 0.5
|
| 177 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 178 |
+
meshes = self.get_mesh()
|
| 179 |
+
surf_mesh, inner_mesh = meshes[f"pial_{hemi}"], meshes[f"white_{hemi}"]
|
| 180 |
+
from nilearn.surface import InMemoryMesh
|
| 181 |
+
|
| 182 |
+
if isinstance(surf_mesh, str):
|
| 183 |
+
import nibabel
|
| 184 |
+
|
| 185 |
+
surf_vertices, surf_faces = nibabel.load(surf_mesh).darrays
|
| 186 |
+
inner_vertices, inner_faces = nibabel.load(inner_mesh).darrays
|
| 187 |
+
surf_vertices, surf_faces = surf_vertices.data, surf_faces.data
|
| 188 |
+
inner_vertices, inner_faces = inner_vertices.data, inner_faces.data
|
| 189 |
+
elif isinstance(surf_mesh, InMemoryMesh):
|
| 190 |
+
surf_vertices, surf_faces = surf_mesh.coordinates, surf_mesh.faces
|
| 191 |
+
inner_vertices, inner_faces = inner_mesh.coordinates, inner_mesh.faces
|
| 192 |
+
else:
|
| 193 |
+
raise TypeError(f"Unsupported mesh type: {type(surf_mesh)}")
|
| 194 |
+
half_vertices = surf_vertices * center_depth + inner_vertices * (
|
| 195 |
+
1 - center_depth
|
| 196 |
+
)
|
| 197 |
+
half_depth_mesh = (half_vertices, surf_faces)
|
| 198 |
+
return half_depth_mesh
|
| 199 |
+
|
| 200 |
+
def apply(self, rec: tp.Any) -> np.ndarray:
|
| 201 |
+
|
| 202 |
+
if len(rec.shape) == 4:
|
| 203 |
+
# 4-D volume data → use nilearn.surface.vol_to_surf
|
| 204 |
+
meshes = self.get_mesh()
|
| 205 |
+
from nilearn.surface import vol_to_surf
|
| 206 |
+
|
| 207 |
+
hemis = []
|
| 208 |
+
for hemi in ("left", "right"):
|
| 209 |
+
if self.center_depth == 1:
|
| 210 |
+
surf_mesh = meshes[f"pial_{hemi}"]
|
| 211 |
+
else:
|
| 212 |
+
surf_mesh = self.get_intermediate_mesh(hemi, self.center_depth)
|
| 213 |
+
hemis.append(
|
| 214 |
+
vol_to_surf(
|
| 215 |
+
rec,
|
| 216 |
+
surf_mesh=surf_mesh,
|
| 217 |
+
inner_mesh=meshes[f"white_{hemi}"],
|
| 218 |
+
radius=self.radius,
|
| 219 |
+
interpolation=self.interpolation,
|
| 220 |
+
kind=self.kind,
|
| 221 |
+
n_samples=self.n_samples,
|
| 222 |
+
mask_img=self.mask_img,
|
| 223 |
+
depth=self.depth,
|
| 224 |
+
)
|
| 225 |
+
)
|
| 226 |
+
return np.vstack(hemis)
|
| 227 |
+
|
| 228 |
+
elif len(rec.shape) == 2:
|
| 229 |
+
# 2-D surface data → downsample to target mesh resolution
|
| 230 |
+
n_vertices = rec.shape[0] // 2
|
| 231 |
+
if n_vertices not in list(FSAVERAGE_SIZES.values()) or rec.shape[0] % 2:
|
| 232 |
+
msg = f"The detected number of vertices ({rec.shape[0]}) is not in {list(FSAVERAGE_SIZES.values())}"
|
| 233 |
+
raise ValueError(msg)
|
| 234 |
+
n_vertices_resampled = FSAVERAGE_SIZES.get(self.mesh)
|
| 235 |
+
data = rec.get_fdata()
|
| 236 |
+
if n_vertices < n_vertices_resampled:
|
| 237 |
+
raise NotImplementedError(
|
| 238 |
+
f"Cannot upsample from {n_vertices} vertices to {n_vertices_resampled} vertices"
|
| 239 |
+
)
|
| 240 |
+
if n_vertices > n_vertices_resampled:
|
| 241 |
+
left = data[:n_vertices_resampled, :]
|
| 242 |
+
right = data[n_vertices : n_vertices + n_vertices_resampled, :]
|
| 243 |
+
data = np.concatenate([left, right], axis=0)
|
| 244 |
+
return data
|
| 245 |
+
else:
|
| 246 |
+
raise ValueError(
|
| 247 |
+
f"Unexpected shape {rec.shape} (should have 2 or 4 dimensions)"
|
| 248 |
+
)
|
src/cortexlab/data/loader.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import typing as tp
|
| 8 |
+
from collections import Counter, OrderedDict, defaultdict
|
| 9 |
+
from functools import lru_cache
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
|
| 12 |
+
import exca
|
| 13 |
+
import mne
|
| 14 |
+
import neuralset as ns
|
| 15 |
+
import numpy as np
|
| 16 |
+
import pandas as pd
|
| 17 |
+
from neuralset.events.study import Chain, Study
|
| 18 |
+
from neuralset.events.transforms import EventsBuilder, EventsTransform
|
| 19 |
+
from neuralset.extractors.neuro import FSAVERAGE_SIZES
|
| 20 |
+
|
| 21 |
+
from cortexlab.data.transforms import RemoveDuplicates
|
| 22 |
+
|
| 23 |
+
FMRI_SPACES = {
|
| 24 |
+
"Algonauts2025Bold": "MNI152NLIN2009C_ASYM_RES_01",
|
| 25 |
+
"Wen2017": "MNI152NLIN6_ASYM_RES_01",
|
| 26 |
+
"Lahner2024Bold": "MNI152NLIN2009C_ASYM_RES_01",
|
| 27 |
+
"Lebel2023Bold": "MNI152NLIN2009C_ASYM_RES_01",
|
| 28 |
+
"Vanessen2023": "MNI152NLIN6_ASYM_RES_01",
|
| 29 |
+
"Aliko2020": "MNICOLIN27",
|
| 30 |
+
"Li2022": "MNICOLIN27",
|
| 31 |
+
"Nastase2020": "MNI152NLIN2009C_ASYM_RES_01",
|
| 32 |
+
}
|
| 33 |
+
RECORDING_DURATIONS = {
|
| 34 |
+
"Algonauts2025Bold/sub-01": 66.4,
|
| 35 |
+
"Algonauts2025Bold/sub-02": 66.4,
|
| 36 |
+
"Algonauts2025Bold/sub-03": 66.4,
|
| 37 |
+
"Algonauts2025Bold/sub-04": 0,
|
| 38 |
+
"Algonauts2025Bold/sub-05": 66.4,
|
| 39 |
+
"Algonauts2025Bold/sub-06": 0,
|
| 40 |
+
"Lahner2024Bold/1": 6.2,
|
| 41 |
+
"Lahner2024Bold/10": 6.2,
|
| 42 |
+
"Lahner2024Bold/2": 6.2,
|
| 43 |
+
"Lahner2024Bold/3": 6.2,
|
| 44 |
+
"Lahner2024Bold/4": 6.2,
|
| 45 |
+
"Lahner2024Bold/5": 6.2,
|
| 46 |
+
"Lahner2024Bold/6": 6.2,
|
| 47 |
+
"Lahner2024Bold/7": 6.2,
|
| 48 |
+
"Lahner2024Bold/8": 6.2,
|
| 49 |
+
"Lahner2024Bold/9": 6.2,
|
| 50 |
+
"Lebel2023Bold/UTS01": 17.9,
|
| 51 |
+
"Lebel2023Bold/UTS02": 18.1,
|
| 52 |
+
"Lebel2023Bold/UTS03": 18.1,
|
| 53 |
+
"Lebel2023Bold/UTS04": 6.2,
|
| 54 |
+
"Lebel2023Bold/UTS05": 6.4,
|
| 55 |
+
"Lebel2023Bold/UTS06": 6.4,
|
| 56 |
+
"Lebel2023Bold/UTS07": 6.4,
|
| 57 |
+
"Lebel2023Bold/UTS08": 6.4,
|
| 58 |
+
"Wen2017/subject1": 11.7,
|
| 59 |
+
"Wen2017/subject2": 11.7,
|
| 60 |
+
"Wen2017/subject3": 11.7,
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class MultiStudyLoader(EventsBuilder):
|
| 65 |
+
"""Config for loading multiple studies.
|
| 66 |
+
Note that the query and enhancers are shared across all studies.
|
| 67 |
+
For example, setting timeline_index == 0 will select the first timeline of each study.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
names: str | list[str]
|
| 71 |
+
path: str | Path
|
| 72 |
+
transforms: list[EventsTransform] | OrderedDict[str, EventsTransform] | None = None
|
| 73 |
+
query: str | None = None
|
| 74 |
+
studies_to_include: list[str] | None = None
|
| 75 |
+
infra_timelines: exca.MapInfra = exca.MapInfra(cluster="processpool", max_jobs=None)
|
| 76 |
+
|
| 77 |
+
def model_post_init(self, log__: tp.Any) -> None:
|
| 78 |
+
super().model_post_init(log__)
|
| 79 |
+
if self.studies_to_include is not None:
|
| 80 |
+
for name in self.studies_to_include:
|
| 81 |
+
if name not in self.names:
|
| 82 |
+
raise ValueError(f"Study {name} not found in {self.names}")
|
| 83 |
+
self.get_studies() # run this so that studies are registered (in case _run is cached)
|
| 84 |
+
|
| 85 |
+
@infra_timelines.apply(item_uid=str)
|
| 86 |
+
def dummy(self, items: tp.Iterable[str]) -> tp.Iterator[None]:
|
| 87 |
+
for item in items:
|
| 88 |
+
yield None
|
| 89 |
+
|
| 90 |
+
def get_studies(self) -> dict[str, Chain]:
|
| 91 |
+
studies = {}
|
| 92 |
+
if isinstance(self.names, str):
|
| 93 |
+
names = [self.names]
|
| 94 |
+
else:
|
| 95 |
+
names = self.names
|
| 96 |
+
for name in names:
|
| 97 |
+
studies[name] = Study(
|
| 98 |
+
name=name,
|
| 99 |
+
path=self.path,
|
| 100 |
+
query=self.query,
|
| 101 |
+
infra_timelines=self.infra_timelines,
|
| 102 |
+
)
|
| 103 |
+
return studies
|
| 104 |
+
|
| 105 |
+
def study_summary(self, apply_query: bool = True) -> pd.DataFrame:
|
| 106 |
+
summaries = []
|
| 107 |
+
for name, study in self.get_studies().items():
|
| 108 |
+
if (
|
| 109 |
+
apply_query
|
| 110 |
+
and self.studies_to_include is not None
|
| 111 |
+
and name not in self.studies_to_include
|
| 112 |
+
):
|
| 113 |
+
continue
|
| 114 |
+
summary = study.study_summary(apply_query=apply_query)
|
| 115 |
+
summary.loc[:, "study"] = name
|
| 116 |
+
summaries.append(summary)
|
| 117 |
+
return pd.concat(summaries, ignore_index=True)
|
| 118 |
+
|
| 119 |
+
def _run(self) -> pd.DataFrame:
|
| 120 |
+
dfs = []
|
| 121 |
+
for name, study in self.get_studies().items():
|
| 122 |
+
if (
|
| 123 |
+
self.studies_to_include is not None
|
| 124 |
+
and name not in self.studies_to_include
|
| 125 |
+
):
|
| 126 |
+
continue
|
| 127 |
+
chain = Chain(steps={"study": study, **OrderedDict(self.transforms)})
|
| 128 |
+
df = chain.run()
|
| 129 |
+
df.loc[:, "study"] = name
|
| 130 |
+
dfs.append(df)
|
| 131 |
+
out = pd.concat(dfs, ignore_index=True)
|
| 132 |
+
return out
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def split_segments_by_time(
|
| 136 |
+
segments: list[ns.segments.Segment], val_ratio: float, split: str
|
| 137 |
+
) -> list[ns.segments.Segment]:
|
| 138 |
+
timeline_segments = defaultdict(list)
|
| 139 |
+
return_segments = []
|
| 140 |
+
for segment in segments:
|
| 141 |
+
if len(segment.ns_events) == 0:
|
| 142 |
+
continue
|
| 143 |
+
timeline = segment.ns_events[0].timeline
|
| 144 |
+
timeline_segments[timeline].append(segment)
|
| 145 |
+
for timeline, segments in timeline_segments.items():
|
| 146 |
+
start = min(segment.start for segment in segments)
|
| 147 |
+
stop = max(segment.stop for segment in segments)
|
| 148 |
+
split_time = start + (stop - start) * val_ratio
|
| 149 |
+
for segment in segments:
|
| 150 |
+
if split == "val" and segment.start < split_time:
|
| 151 |
+
return_segments.append(segment)
|
| 152 |
+
elif split == "train" and segment.start >= split_time:
|
| 153 |
+
return_segments.append(segment)
|
| 154 |
+
return return_segments
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def assign_fmri_space(events: pd.DataFrame, space: str | None = None) -> pd.DataFrame:
|
| 158 |
+
assert events.study.nunique() == 1, "Only one study can be assigned at a time"
|
| 159 |
+
study_name = events.study.unique()[0]
|
| 160 |
+
if study_name not in FMRI_SPACES:
|
| 161 |
+
raise ValueError(f"Study {study_name} not found in FMRI_SPACES")
|
| 162 |
+
default_space = FMRI_SPACES[study_name]
|
| 163 |
+
assigned_space = space or default_space
|
| 164 |
+
events.loc[events.type == "Fmri", "space"] = assigned_space
|
| 165 |
+
return events
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def set_study_in_average_subject_mode(
|
| 169 |
+
study: EventsBuilder, trigger_type: str, trigger_field: str = "filepath"
|
| 170 |
+
) -> EventsBuilder:
|
| 171 |
+
study.transforms["alignevents"] = ns.events.transforms.AlignEvents(
|
| 172 |
+
trigger_type=trigger_type, trigger_field=trigger_field, types_to_align="Event"
|
| 173 |
+
)
|
| 174 |
+
study.transforms["removeduplicates"] = RemoveDuplicates(
|
| 175 |
+
subset=["start", "stop", "filepath", "type"]
|
| 176 |
+
)
|
| 177 |
+
for key in ["chunksounds", "chunkvideos"]:
|
| 178 |
+
study.transforms.move_to_end(key)
|
| 179 |
+
return study
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def get_subject_weights(
|
| 183 |
+
subject_id_mapping: dict[str, int],
|
| 184 |
+
weigh_by: tp.Literal[
|
| 185 |
+
"n_subjects", "speech", "video", "recording_time"
|
| 186 |
+
] = "n_subjects",
|
| 187 |
+
) -> dict[str, float]:
|
| 188 |
+
subject_weights = []
|
| 189 |
+
if weigh_by in ["speech", "video"]:
|
| 190 |
+
for subject in subject_id_mapping:
|
| 191 |
+
if weigh_by == "speech":
|
| 192 |
+
weight = int(subject.startswith("Lebel"))
|
| 193 |
+
elif weigh_by == "video":
|
| 194 |
+
weight = int(subject.startswith("Algonauts"))
|
| 195 |
+
subject_weights.append(float(weight))
|
| 196 |
+
elif weigh_by == "recording_time":
|
| 197 |
+
for subject in subject_id_mapping:
|
| 198 |
+
if subject not in RECORDING_DURATIONS:
|
| 199 |
+
raise ValueError(f"Subject {subject} not found in RECORDING_DURATIONS")
|
| 200 |
+
subject_weights.append(float(RECORDING_DURATIONS[subject]))
|
| 201 |
+
elif weigh_by == "n_subjects":
|
| 202 |
+
num_subjects_per_study = Counter(
|
| 203 |
+
[k.split("/")[0] for k in subject_id_mapping.keys()]
|
| 204 |
+
)
|
| 205 |
+
for subject in subject_id_mapping:
|
| 206 |
+
weight = 1 / num_subjects_per_study[subject.split("/")[0]]
|
| 207 |
+
subject_weights.append(float(weight))
|
| 208 |
+
else:
|
| 209 |
+
raise ValueError(f"Invalid weight type: {weigh_by}")
|
| 210 |
+
return subject_weights
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@lru_cache
|
| 214 |
+
def get_hcp_labels(mesh="fsaverage5", combine=False, hemi="both"):
|
| 215 |
+
"""
|
| 216 |
+
Get the HCP labels for the fsaverage subject.
|
| 217 |
+
"""
|
| 218 |
+
if hemi in ["right", "left"]:
|
| 219 |
+
subjects_dir = Path(mne.datasets.sample.data_path()) / "subjects"
|
| 220 |
+
mne.datasets.fetch_hcp_mmp_parcellation(
|
| 221 |
+
subjects_dir=subjects_dir, accept=True, verbose=True, combine=combine
|
| 222 |
+
)
|
| 223 |
+
name = "HCPMMP1_combined" if combine else "HCPMMP1"
|
| 224 |
+
with ns.utils.ignore_all():
|
| 225 |
+
labels = mne.read_labels_from_annot(
|
| 226 |
+
"fsaverage", name, hemi="both", subjects_dir=subjects_dir
|
| 227 |
+
)
|
| 228 |
+
label_to_vertices = {}
|
| 229 |
+
for label in labels:
|
| 230 |
+
name, vertices = label.name, np.array(label.vertices)
|
| 231 |
+
if not combine:
|
| 232 |
+
name = name[2:]
|
| 233 |
+
name = name.replace("_ROI", "") # .replace(" Cortex", "")
|
| 234 |
+
if (hemi == "right" and "-lh" in name) or (
|
| 235 |
+
hemi == "left" and "-rh" in name
|
| 236 |
+
):
|
| 237 |
+
continue
|
| 238 |
+
name = name.replace("-rh", "").replace("-lh", "")
|
| 239 |
+
label_to_vertices[name] = np.array(vertices)
|
| 240 |
+
assert sum(len(v) for v in label_to_vertices.values()) == 163842
|
| 241 |
+
expected_size = FSAVERAGE_SIZES[mesh]
|
| 242 |
+
index_offset = expected_size if hemi == "right" else 0
|
| 243 |
+
label_to_vertices = {
|
| 244 |
+
k: v[v < expected_size] + index_offset for k, v in label_to_vertices.items()
|
| 245 |
+
}
|
| 246 |
+
assert sum(len(v) for v in label_to_vertices.values()) == expected_size
|
| 247 |
+
return label_to_vertices
|
| 248 |
+
else:
|
| 249 |
+
assert hemi == "both", f"Invalid hemisphere: {hemi}"
|
| 250 |
+
left, right = get_hcp_labels(
|
| 251 |
+
mesh=mesh, combine=combine, hemi="left"
|
| 252 |
+
), get_hcp_labels(mesh=mesh, combine=combine, hemi="right")
|
| 253 |
+
label_to_vertices = {
|
| 254 |
+
k: np.concatenate([left[k], right[k]]) for k in left.keys()
|
| 255 |
+
}
|
| 256 |
+
return label_to_vertices
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def get_hcp_vertex_labels(mesh="fsaverage5", combine=False):
|
| 260 |
+
labels = get_hcp_labels(mesh, combine)
|
| 261 |
+
out = [""] * FSAVERAGE_SIZES[mesh] * 2
|
| 262 |
+
for label, vertices in labels.items():
|
| 263 |
+
for vertex in vertices:
|
| 264 |
+
out[int(vertex)] = label
|
| 265 |
+
return out
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def get_hcp_roi_indices(rois: str | list[str], hemi="both", mesh="fsaverage5"):
|
| 269 |
+
labels = get_hcp_labels(mesh=mesh, combine=False, hemi=hemi)
|
| 270 |
+
if isinstance(rois, str):
|
| 271 |
+
rois = [rois]
|
| 272 |
+
selected_labels = []
|
| 273 |
+
for roi in rois:
|
| 274 |
+
if roi[-1] == "*":
|
| 275 |
+
sel = [label for label in labels.keys() if label.startswith(roi[:-1])]
|
| 276 |
+
elif roi[0] == "*":
|
| 277 |
+
sel = [label for label in labels.keys() if label.endswith(roi[1:])]
|
| 278 |
+
else:
|
| 279 |
+
sel = [label for label in labels.keys() if label == roi]
|
| 280 |
+
if not sel:
|
| 281 |
+
raise ValueError(f"ROI {roi} not found in HCP labels")
|
| 282 |
+
selected_labels.extend(sel)
|
| 283 |
+
vertex_indices = np.concatenate([labels[label] for label in selected_labels])
|
| 284 |
+
return vertex_indices
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def summarize_by_roi(data: np.ndarray, hemi="both", mesh="fsaverage5"):
|
| 288 |
+
assert data.ndim == 1, "Data must be 1D"
|
| 289 |
+
if hemi in ["left", "right", "both"]:
|
| 290 |
+
labels = get_hcp_labels(mesh=mesh, combine=False, hemi=hemi)
|
| 291 |
+
out = np.array(
|
| 292 |
+
[
|
| 293 |
+
data[get_hcp_roi_indices(roi, hemi=hemi, mesh=mesh)].mean()
|
| 294 |
+
for roi in labels.keys()
|
| 295 |
+
]
|
| 296 |
+
)
|
| 297 |
+
elif hemi == "both_separate":
|
| 298 |
+
out = np.concatenate(
|
| 299 |
+
[
|
| 300 |
+
summarize_by_roi(data, hemi="left", mesh=mesh),
|
| 301 |
+
summarize_by_roi(data, hemi="right", mesh=mesh),
|
| 302 |
+
]
|
| 303 |
+
)
|
| 304 |
+
else:
|
| 305 |
+
raise ValueError(f"Invalid hemisphere: {hemi}")
|
| 306 |
+
return out
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def get_topk_rois(data: np.ndarray, hemi="both", mesh="fsaverage5", k=10) -> list[str]:
|
| 310 |
+
values = summarize_by_roi(data, hemi=hemi, mesh=mesh)
|
| 311 |
+
if hemi == "both_separate":
|
| 312 |
+
left_labels = get_hcp_labels(mesh=mesh, combine=False, hemi="left").keys()
|
| 313 |
+
right_labels = get_hcp_labels(mesh=mesh, combine=False, hemi="right").keys()
|
| 314 |
+
labels = [f"{l}-lh" for l in left_labels] + [f"{l}-rh" for l in right_labels]
|
| 315 |
+
else:
|
| 316 |
+
labels = get_hcp_labels(mesh=mesh, combine=False, hemi=hemi).keys()
|
| 317 |
+
top_k = np.argsort(values)[::-1][:k]
|
| 318 |
+
return np.array(labels)[top_k]
|
src/cortexlab/data/studies/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from .algonauts2025 import Algonauts2025, Algonauts2025Bold
|
| 8 |
+
from .lahner2024bold import Lahner2024Bold
|
| 9 |
+
from .lebel2023bold import Lebel2023Bold
|
| 10 |
+
from .wen2017 import Wen2017
|
src/cortexlab/data/studies/algonauts2025.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
"""Algonauts Project 2025 Challenge: fMRI responses to multimodal movie stimuli.
|
| 7 |
+
|
| 8 |
+
This study is part of the Algonauts Project 2025 Challenge, using a subset of the
|
| 9 |
+
Courtois NeuroMod dataset (https://www.cneuromod.ca/). Participants watched naturalistic
|
| 10 |
+
video stimuli including episodes from the TV sitcom "Friends" and extractor films while
|
| 11 |
+
undergoing fMRI scanning.
|
| 12 |
+
|
| 13 |
+
Experimental Design:
|
| 14 |
+
- 4 participants (sub-01, sub-02, sub-03, sub-05)
|
| 15 |
+
- Two stimulus types:
|
| 16 |
+
* "Friends" sitcom: 7 seasons, ~175 episodes, segmented into ~5min chunks (a,b,c,d)
|
| 17 |
+
* "movie10": 4 extractor films (Bourne, Wolf, Life, Figures) in ~5min chunks
|
| 18 |
+
- TR = 1.49 seconds
|
| 19 |
+
- Training data: Friends seasons 1-6, all movies
|
| 20 |
+
- Test data: Friends season 7
|
| 21 |
+
- Some movies shown twice (Life, Figures) for reliability analysis
|
| 22 |
+
|
| 23 |
+
Data Format:
|
| 24 |
+
- Preprocessed fMRI in MNI152NLin2009cAsym space
|
| 25 |
+
- Parcellated using Schaefer-1000 atlas (1000 parcels, 7 networks)
|
| 26 |
+
- HDF5 format
|
| 27 |
+
- Video stimuli provided as .mkv files
|
| 28 |
+
- Word-level transcripts with timestamps (.tsv format)
|
| 29 |
+
- Includes rich multimodal annotations (speech, text, visual extractors)
|
| 30 |
+
|
| 31 |
+
Download Requirements:
|
| 32 |
+
- Datalad must be installed (pip install datalad)
|
| 33 |
+
- Git must be configured
|
| 34 |
+
- Dataset cloned from: https://github.com/courtois-neuromod/algonauts_2025.competitors.git
|
| 35 |
+
- Moderate dataset size (~several GB)
|
| 36 |
+
|
| 37 |
+
Note:
|
| 38 |
+
This dataset is designed for the Algonauts 2025 Challenge focused on predicting
|
| 39 |
+
brain responses to complex, naturalistic multimodal stimuli.
|
| 40 |
+
See: https://algonautsproject.com/2025/index.html
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
import ast
|
| 44 |
+
import logging
|
| 45 |
+
import typing as tp
|
| 46 |
+
from itertools import product
|
| 47 |
+
from pathlib import Path
|
| 48 |
+
|
| 49 |
+
import numpy as np
|
| 50 |
+
import pandas as pd
|
| 51 |
+
from neuralset.events import study
|
| 52 |
+
|
| 53 |
+
logger = logging.getLogger(__name__)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class Algonauts2025(study.Study):
|
| 57 |
+
_SUBJECTS: tp.ClassVar[list[str]] = ["sub-01", "sub-02", "sub-03", "sub-05"]
|
| 58 |
+
_TASKS: tp.ClassVar[list[str]] = ["friends", "movie10"]
|
| 59 |
+
_SPACE: tp.ClassVar[str] = "space-MNI152NLin2009cAsym"
|
| 60 |
+
_ATLAS: tp.ClassVar[str] = "atlas-Schaefer18_parcel-1000Par7Net"
|
| 61 |
+
_FREQUENCY: tp.ClassVar[float] = 1 / 1.49
|
| 62 |
+
|
| 63 |
+
device: tp.ClassVar[str] = "Fmri"
|
| 64 |
+
dataset_name: tp.ClassVar[str] = "Algonauts 2025 Challenge"
|
| 65 |
+
url: tp.ClassVar[str] = "https://algonautsproject.com/"
|
| 66 |
+
bibtex: tp.ClassVar[
|
| 67 |
+
str
|
| 68 |
+
] = """
|
| 69 |
+
@article{algonauts2025,
|
| 70 |
+
url = {https://arxiv.org/abs/2501.00504},
|
| 71 |
+
author = {Gifford, Alessandro T. and Bersch, Domenic and St-Laurent, Marie and Pinsard, Basile and Boyle, Julie and Bellec, Lune and Oliva, Aude and Roig, Gemma and Cichy, Radoslaw M.},
|
| 72 |
+
keywords = {Neurons and Cognition (q-bio.NC), FOS: Biological sciences, FOS: Biological sciences},
|
| 73 |
+
title = {The Algonauts Project 2025 Challenge: How the Human Brain Makes Sense of Multimodal Movies},
|
| 74 |
+
publisher = {arXiv},
|
| 75 |
+
year = {2025},
|
| 76 |
+
copyright = {Creative Commons Attribution 4.0 International},
|
| 77 |
+
doi={https://doi.org/10.48550/arXiv.2501.00504},
|
| 78 |
+
url={https://arxiv.org/abs/2501.00504}
|
| 79 |
+
}
|
| 80 |
+
"""
|
| 81 |
+
description: tp.ClassVar[str] = (
|
| 82 |
+
'Subset of Courtois NeuroMod dataset (boyle2020) with fMRI recordings of subjects watching videos of a popular sitcom ("Friends") for Algonauts 2025'
|
| 83 |
+
)
|
| 84 |
+
requirements: tp.ClassVar[tuple[str, ...]] = (
|
| 85 |
+
"datalad>=0.19.5",
|
| 86 |
+
"moviepy",
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
_info: tp.ClassVar[study.StudyInfo] = study.StudyInfo(
|
| 90 |
+
num_timelines=1588,
|
| 91 |
+
num_subjects=4,
|
| 92 |
+
num_events_in_query=1700,
|
| 93 |
+
event_types_in_query={"Fmri", "Video", "Word", "Text"},
|
| 94 |
+
data_shape=(1000, 592),
|
| 95 |
+
frequency=0.671,
|
| 96 |
+
fmri_spaces=("custom",),
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
def _download(self) -> None:
|
| 100 |
+
raise NotImplementedError("Download method not implemented yet")
|
| 101 |
+
|
| 102 |
+
def iter_timelines(self) -> tp.Iterator[dict[str, tp.Any]]:
|
| 103 |
+
for subject in self._SUBJECTS:
|
| 104 |
+
for task in self._TASKS:
|
| 105 |
+
if task == "friends":
|
| 106 |
+
season_episode_chunk = range(1, 8), range(1, 26), "abcd"
|
| 107 |
+
for season, episode, chunk in product(*season_episode_chunk):
|
| 108 |
+
tl = dict(
|
| 109 |
+
subject=subject,
|
| 110 |
+
task=task,
|
| 111 |
+
movie=f"s{season:02d}",
|
| 112 |
+
chunk=f"e{episode:02d}{chunk}",
|
| 113 |
+
run=0,
|
| 114 |
+
)
|
| 115 |
+
stim_path = self._get_transcript_filepath(tl)
|
| 116 |
+
if (
|
| 117 |
+
(season == 5 and episode == 20 and chunk == "a")
|
| 118 |
+
or (season == 4 and episode == 1 and chunk == "a")
|
| 119 |
+
or (season == 6 and episode == 3 and chunk == "a")
|
| 120 |
+
or (season == 4 and episode == 13 and chunk == "b")
|
| 121 |
+
or (season == 4 and episode == 1 and chunk == "b")
|
| 122 |
+
):
|
| 123 |
+
continue
|
| 124 |
+
if stim_path.exists():
|
| 125 |
+
yield tl
|
| 126 |
+
elif task == "movie10":
|
| 127 |
+
movie_chunk_run = (
|
| 128 |
+
["bourne", "wolf", "life", "figures"],
|
| 129 |
+
range(1, 18),
|
| 130 |
+
[1, 2],
|
| 131 |
+
)
|
| 132 |
+
for movie, chunk, run in product(*movie_chunk_run): # type: ignore
|
| 133 |
+
if movie in ["bourne", "wolf"] and run == 2:
|
| 134 |
+
continue
|
| 135 |
+
tl = dict(
|
| 136 |
+
subject=subject,
|
| 137 |
+
task=task,
|
| 138 |
+
movie=movie,
|
| 139 |
+
chunk=str(chunk),
|
| 140 |
+
run=run,
|
| 141 |
+
)
|
| 142 |
+
stim_path = self._get_transcript_filepath(tl)
|
| 143 |
+
if stim_path.exists():
|
| 144 |
+
yield tl
|
| 145 |
+
|
| 146 |
+
def _get_transcript_filepath(self, timeline: dict[str, tp.Any]) -> Path:
|
| 147 |
+
tl = timeline
|
| 148 |
+
base = (
|
| 149 |
+
self.path
|
| 150 |
+
/ "download/algonauts_2025.competitors/stimuli/transcripts"
|
| 151 |
+
/ tl["task"]
|
| 152 |
+
)
|
| 153 |
+
if tl["task"] == "friends":
|
| 154 |
+
return base / f"s{tl['movie'][-1]}/friends_{tl['movie']}{tl['chunk']}.tsv"
|
| 155 |
+
elif tl["task"] == "movie10":
|
| 156 |
+
return (
|
| 157 |
+
base / f"{tl['movie']}/movie10_{tl['movie']}{int(tl['chunk']):02d}.tsv"
|
| 158 |
+
)
|
| 159 |
+
raise ValueError(f"Unknown task: {tl['task']}")
|
| 160 |
+
|
| 161 |
+
def _get_movie_filepath(self, timeline: dict[str, tp.Any]) -> Path:
|
| 162 |
+
tl = timeline
|
| 163 |
+
base = (
|
| 164 |
+
self.path
|
| 165 |
+
/ "download/algonauts_2025.competitors/stimuli/movies"
|
| 166 |
+
/ tl["task"]
|
| 167 |
+
)
|
| 168 |
+
if tl["task"] == "friends":
|
| 169 |
+
return base / f"s{tl['movie'][-1]}/friends_{tl['movie']}{tl['chunk']}.mkv"
|
| 170 |
+
elif tl["task"] == "movie10":
|
| 171 |
+
return base / f"{tl['movie']}/{tl['movie']}{int(tl['chunk']):02d}.mkv"
|
| 172 |
+
raise ValueError(f"Unknown task: {tl['task']}")
|
| 173 |
+
|
| 174 |
+
def _get_fmri_filepath(self, timeline: dict[str, tp.Any]) -> Path:
|
| 175 |
+
tl = timeline
|
| 176 |
+
subj_dir = (
|
| 177 |
+
self.path
|
| 178 |
+
/ "download/algonauts_2025.competitors/fmri"
|
| 179 |
+
/ tl["subject"]
|
| 180 |
+
/ "func"
|
| 181 |
+
)
|
| 182 |
+
stem = f"{tl['subject']}_task-{tl['task']}_{self._SPACE}_{self._ATLAS}"
|
| 183 |
+
suffix = "_desc-s123456_bold.h5" if tl["task"] == "friends" else "_bold.h5"
|
| 184 |
+
return subj_dir / f"{stem}{suffix}"
|
| 185 |
+
|
| 186 |
+
def _load_fmri(self, timeline: dict[str, tp.Any]) -> tp.Any:
|
| 187 |
+
import h5py
|
| 188 |
+
|
| 189 |
+
tl = timeline
|
| 190 |
+
fmri_file = self._get_fmri_filepath(timeline)
|
| 191 |
+
fmri = h5py.File(fmri_file, "r")
|
| 192 |
+
if tl["task"] == "friends":
|
| 193 |
+
key = f"{tl['movie'][1:]}{tl['chunk']}"
|
| 194 |
+
else:
|
| 195 |
+
key = f"{tl['movie']}{int(tl['chunk']):02d}"
|
| 196 |
+
if tl["movie"] in ["life", "figures"]:
|
| 197 |
+
key += f"_run-{tl['run']}"
|
| 198 |
+
selected_key = [key_ for key_ in fmri.keys() if key in key_]
|
| 199 |
+
if len(selected_key) != 1:
|
| 200 |
+
logger.error(
|
| 201 |
+
"key=%s, selected=%s, available=%s",
|
| 202 |
+
key,
|
| 203 |
+
selected_key,
|
| 204 |
+
list(fmri.keys()),
|
| 205 |
+
)
|
| 206 |
+
raise ValueError(f"Multiple or no keys found, {key}, {list(fmri.keys())}")
|
| 207 |
+
fmri = fmri[selected_key[0]]
|
| 208 |
+
data = fmri[:].astype(np.float32)
|
| 209 |
+
import nibabel
|
| 210 |
+
|
| 211 |
+
obj = nibabel.Nifti2Image(data.T, affine=np.eye(4))
|
| 212 |
+
return obj
|
| 213 |
+
|
| 214 |
+
def _get_split(self, timeline: dict[str, tp.Any]) -> str:
|
| 215 |
+
tl = timeline
|
| 216 |
+
if tl["task"] == "friends":
|
| 217 |
+
if int(tl["movie"][-1]) in range(1, 7):
|
| 218 |
+
return "train"
|
| 219 |
+
elif int(tl["movie"][-1]) == 7:
|
| 220 |
+
return "test"
|
| 221 |
+
return "train"
|
| 222 |
+
|
| 223 |
+
def _get_fmri_event(self, timeline: dict[str, tp.Any]) -> dict[str, tp.Any]:
|
| 224 |
+
"""Return fmri event dict"""
|
| 225 |
+
info = study.SpecialLoader(method=self._load_fmri, timeline=timeline).to_json()
|
| 226 |
+
return dict(type="Fmri", filepath=info, start=0, frequency=self._FREQUENCY)
|
| 227 |
+
|
| 228 |
+
def _load_timeline_events(self, timeline: dict[str, tp.Any]) -> pd.DataFrame:
|
| 229 |
+
all_events = []
|
| 230 |
+
if (timeline["task"], timeline["movie"]) != ("friends", "s07"):
|
| 231 |
+
all_events.append(self._get_fmri_event(timeline))
|
| 232 |
+
|
| 233 |
+
movie_filepath = self._get_movie_filepath(timeline)
|
| 234 |
+
movie_event = dict(type="Video", filepath=str(movie_filepath), start=0)
|
| 235 |
+
all_events.append(movie_event)
|
| 236 |
+
|
| 237 |
+
transcript_path = self._get_transcript_filepath(timeline)
|
| 238 |
+
transcript_df = pd.read_csv(transcript_path, sep="\t")
|
| 239 |
+
word_events = []
|
| 240 |
+
for _, row in transcript_df.iterrows():
|
| 241 |
+
words = ast.literal_eval(row["words_per_tr"])
|
| 242 |
+
starts = ast.literal_eval(row["onsets_per_tr"])
|
| 243 |
+
durations = ast.literal_eval(row["durations_per_tr"])
|
| 244 |
+
for word, start, duration in zip(words, starts, durations):
|
| 245 |
+
event = dict(
|
| 246 |
+
type="Word",
|
| 247 |
+
text=word,
|
| 248 |
+
start=start,
|
| 249 |
+
duration=duration,
|
| 250 |
+
stop=start + duration,
|
| 251 |
+
language="english",
|
| 252 |
+
)
|
| 253 |
+
word_events.append(event)
|
| 254 |
+
if word_events:
|
| 255 |
+
word_df = pd.DataFrame(word_events)
|
| 256 |
+
text = " ".join(word_df["text"].tolist())
|
| 257 |
+
text_event = dict(
|
| 258 |
+
type="Text",
|
| 259 |
+
text=text,
|
| 260 |
+
start=word_df["start"].min(),
|
| 261 |
+
duration=word_df["stop"].max() - word_df["start"].min(),
|
| 262 |
+
stop=word_df["stop"].max(),
|
| 263 |
+
language="english",
|
| 264 |
+
)
|
| 265 |
+
all_events.append(text_event)
|
| 266 |
+
all_events.extend(word_events)
|
| 267 |
+
|
| 268 |
+
events_df = pd.DataFrame(all_events)
|
| 269 |
+
events_df["split"] = self._get_split(timeline)
|
| 270 |
+
|
| 271 |
+
events_df.loc[events_df.type.isin(["Word", "Sentence", "Text"]), "modality"] = (
|
| 272 |
+
"heard"
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
return events_df
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class Algonauts2025Bold(Algonauts2025):
|
| 279 |
+
|
| 280 |
+
_info: tp.ClassVar[study.StudyInfo] = study.StudyInfo(
|
| 281 |
+
num_timelines=1588,
|
| 282 |
+
num_subjects=4,
|
| 283 |
+
num_events_in_query=1700,
|
| 284 |
+
event_types_in_query={"Fmri", "Video", "Word", "Text"},
|
| 285 |
+
data_shape=(76, 90, 71, 592),
|
| 286 |
+
frequency=0.671,
|
| 287 |
+
fmri_spaces=("T1w", "MNI152NLin2009cAsym"),
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
def _download(self) -> None:
|
| 291 |
+
raise NotImplementedError("Download method not implemented yet")
|
| 292 |
+
|
| 293 |
+
def _get_fmri_event(self, timeline: dict[str, tp.Any]) -> dict[str, tp.Any]:
|
| 294 |
+
"""Return fmri event dict using fmriprep finder"""
|
| 295 |
+
tl = timeline
|
| 296 |
+
if tl["task"] == "friends":
|
| 297 |
+
task_str = f"{tl['movie']}{tl['chunk']}"
|
| 298 |
+
else:
|
| 299 |
+
task_str = f"{tl['movie']}{int(tl['chunk']):02d}"
|
| 300 |
+
subj_dir = self.path / "download" / f"{tl['task']}.fmriprep" / tl["subject"]
|
| 301 |
+
task_pattern = f"*_task-{task_str}_*"
|
| 302 |
+
for session_dir in sorted(subj_dir.iterdir()):
|
| 303 |
+
if not session_dir.name.startswith("ses-"):
|
| 304 |
+
continue
|
| 305 |
+
func_dir = session_dir / "func"
|
| 306 |
+
if func_dir.exists() and list(func_dir.glob(task_pattern + ".nii.gz")):
|
| 307 |
+
fp = func_dir / task_pattern
|
| 308 |
+
return dict(
|
| 309 |
+
type="Fmri",
|
| 310 |
+
filepath=fp,
|
| 311 |
+
layout="fmriprep",
|
| 312 |
+
start=0,
|
| 313 |
+
frequency=self._FREQUENCY,
|
| 314 |
+
)
|
| 315 |
+
raise FileNotFoundError(f"No fMRI file found for {tl}")
|
src/cortexlab/data/studies/lahner2024bold.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
"""BOLD Moments: 3T fMRI responses to short naturalistic videos.
|
| 7 |
+
|
| 8 |
+
This study provides 3T BOLD fMRI data from 10 participants viewing brief (3-second)
|
| 9 |
+
naturalistic video clips. The dataset is designed to study neural responses to
|
| 10 |
+
dynamic visual events and includes rich metadata and annotations. The test set's high
|
| 11 |
+
repetition count (10 reps) enables reliability analysis and within-subject
|
| 12 |
+
generalization studies.
|
| 13 |
+
|
| 14 |
+
Experimental Design:
|
| 15 |
+
- 3T fMRI recordings (TR = 1.75 seconds)
|
| 16 |
+
- 10 participants
|
| 17 |
+
- 4 functional scanning sessions per subject (sessions 2-5)
|
| 18 |
+
- Two sets of stimuli:
|
| 19 |
+
* Training set: 1,000 unique 3-second video clips (10 runs)
|
| 20 |
+
* Test set: 102 unique 3-second video clips (3 runs, 10 repetitions each)
|
| 21 |
+
- Paradigm: passive viewing of naturalistic video clips
|
| 22 |
+
- Oddball trials included for attention monitoring (excluded from analysis)
|
| 23 |
+
|
| 24 |
+
Data Format:
|
| 25 |
+
- BIDS-compliant dataset structure
|
| 26 |
+
- fMRIPrep preprocessed data (version B recommended by authors)
|
| 27 |
+
- Available in multiple spaces:
|
| 28 |
+
* MNI152NLin2009cAsym (volumetric)
|
| 29 |
+
* T1w (subject-native volumetric)
|
| 30 |
+
* fsaverage (cortical surface, 163842 vertices per hemisphere)
|
| 31 |
+
* fsnative (subject-specific cortical surface)
|
| 32 |
+
- Pre-computed GLM betas available for fsaverage space
|
| 33 |
+
- Video stimuli
|
| 34 |
+
- Event annotations:
|
| 35 |
+
* LLM-generated captions for middle frames of each video
|
| 36 |
+
|
| 37 |
+
Download Requirements:
|
| 38 |
+
- openneuro-py for fMRI data download
|
| 39 |
+
- Stimuli downloaded from boldmomentsdataset.csail.mit.edu
|
| 40 |
+
- Moderate dataset size (~several GB)
|
| 41 |
+
- moviepy required for video processing
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
import json
|
| 45 |
+
import pickle as pkl
|
| 46 |
+
import typing as tp
|
| 47 |
+
from pathlib import Path
|
| 48 |
+
|
| 49 |
+
import nibabel
|
| 50 |
+
import numpy as np
|
| 51 |
+
import pandas as pd
|
| 52 |
+
from neuralset.events import study
|
| 53 |
+
from neuralset.utils import get_bids_filepath, get_masked_bold_image, read_bids_events
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class Lahner2024Bold(study.Study):
|
| 57 |
+
device: tp.ClassVar[str] = "Fmri"
|
| 58 |
+
dataset_name: tp.ClassVar[str] = "BOLD Moments"
|
| 59 |
+
bibtex: tp.ClassVar[
|
| 60 |
+
str
|
| 61 |
+
] = """
|
| 62 |
+
@article{Lahner2024,
|
| 63 |
+
title = {Modeling short visual events through the BOLD moments video fMRI dataset and metadata},
|
| 64 |
+
volume = {15},
|
| 65 |
+
ISSN = {2041-1723},
|
| 66 |
+
url = {http://dx.doi.org/10.1038/s41467-024-50310-3},
|
| 67 |
+
DOI = {10.1038/s41467-024-50310-3},
|
| 68 |
+
number = {1},
|
| 69 |
+
journal = {Nature Communications},
|
| 70 |
+
publisher = {Springer Science and Business Media LLC},
|
| 71 |
+
author = {Lahner, Benjamin and Dwivedi, Kshitij and Iamshchinina, Polina and Graumann, Monika and Lascelles, Alex and Roig, Gemma and Gifford, Alessandro Thomas and Pan, Bowen and Jin, SouYoung and Ratan Murty, N. Apurva and Kay, Kendrick and Oliva, Aude and Cichy, Radoslaw},
|
| 72 |
+
year = {2024},
|
| 73 |
+
month = jul
|
| 74 |
+
}
|
| 75 |
+
"""
|
| 76 |
+
licence: tp.ClassVar[str] = "CC0"
|
| 77 |
+
description: tp.ClassVar[str] = (
|
| 78 |
+
"BOLD Moments: 3T fMRI from 10 participants viewing 1,000+ brief "
|
| 79 |
+
"(3-second) naturalistic videos"
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
requirements: tp.ClassVar[tuple[str, ...]] = ("moviepy==2.0.0.dev2",)
|
| 83 |
+
|
| 84 |
+
_info: tp.ClassVar[study.StudyInfo] = study.StudyInfo(
|
| 85 |
+
num_timelines=520,
|
| 86 |
+
num_subjects=10,
|
| 87 |
+
num_events_in_query=76,
|
| 88 |
+
event_types_in_query={"Fmri", "Video"},
|
| 89 |
+
data_shape=(62, 77, 61, 238),
|
| 90 |
+
frequency=0.571,
|
| 91 |
+
fmri_spaces=("custom",),
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
NUM_SUBJECTS: tp.ClassVar[int] = 10
|
| 95 |
+
NUM_RUNS_PER_SPLIT: tp.ClassVar[dict[str, int]] = {"train": 10, "test": 3}
|
| 96 |
+
|
| 97 |
+
DERIVATIVES_FOLDER: tp.ClassVar[str] = "download/derivatives/versionB/fmriprep"
|
| 98 |
+
SPACES: tp.ClassVar[tuple[str, ...]] = (
|
| 99 |
+
"MNI152NLin2009cAsym",
|
| 100 |
+
"T1w",
|
| 101 |
+
"fsaverage",
|
| 102 |
+
"fsnative",
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
N_TRIALS_TRAIN: tp.ClassVar[int] = 1000
|
| 106 |
+
N_TRIALS_TEST: tp.ClassVar[int] = 102
|
| 107 |
+
N_VOLUMES_TRAIN: tp.ClassVar[int] = 238
|
| 108 |
+
N_VOLUMES_TEST: tp.ClassVar[int] = 268
|
| 109 |
+
TR_FMRI_S: tp.ClassVar[float] = 1.75
|
| 110 |
+
|
| 111 |
+
def _download(self) -> None:
|
| 112 |
+
raise NotImplementedError("Download method not implemented yet")
|
| 113 |
+
|
| 114 |
+
def _validate_downloaded_data(self) -> None:
|
| 115 |
+
postfixs = [
|
| 116 |
+
"_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz",
|
| 117 |
+
"_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz",
|
| 118 |
+
"_hemi-R_space-fsaverage_bold.func.gii",
|
| 119 |
+
"_hemi-L_space-fsaverage_bold.func.gii",
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
for tl in self.iter_timelines():
|
| 123 |
+
subj, ses, split, run = tl["subject"], tl["session"], tl["split"], tl["run"]
|
| 124 |
+
for postfix in postfixs:
|
| 125 |
+
fp = self.path / (
|
| 126 |
+
f"sub-{subj:02d}/ses-{ses:02d}/func/sub-{subj:02d}"
|
| 127 |
+
f"_ses-{ses:02d}_task-{split}_run-{run:01d}{postfix}"
|
| 128 |
+
)
|
| 129 |
+
if not fp.exists():
|
| 130 |
+
msg = f"{fp} is missing. Please download again"
|
| 131 |
+
raise RuntimeError(msg)
|
| 132 |
+
|
| 133 |
+
for subj in range(1, self.NUM_SUBJECTS + 1):
|
| 134 |
+
betas_root = (
|
| 135 |
+
self.path / "download/derivatives/versionB/fsaverage/GLM/"
|
| 136 |
+
f"sub-{subj:02}/prepared_betas/"
|
| 137 |
+
)
|
| 138 |
+
for split in ("train", "test"):
|
| 139 |
+
for hemi in ("left", "right"):
|
| 140 |
+
fp = (
|
| 141 |
+
betas_root / f"sub-{subj:02}_organized_betas_task-{split}"
|
| 142 |
+
f"_hemi-{hemi}_normalized.pkl"
|
| 143 |
+
)
|
| 144 |
+
if not fp.exists():
|
| 145 |
+
msg = f"{fp} is missing. Please download again"
|
| 146 |
+
raise RuntimeError(msg)
|
| 147 |
+
with fp.open("rb") as f:
|
| 148 |
+
prepared_betas = pkl.load(f)
|
| 149 |
+
betas = prepared_betas[0]
|
| 150 |
+
n_trials = (
|
| 151 |
+
self.N_TRIALS_TEST
|
| 152 |
+
if split == "test"
|
| 153 |
+
else self.N_TRIALS_TRAIN
|
| 154 |
+
)
|
| 155 |
+
n_reps = 10 if split == "test" else 3
|
| 156 |
+
betas_shape = (n_trials, n_reps, 163842)
|
| 157 |
+
if betas.shape != betas_shape:
|
| 158 |
+
msg = f"Expected {betas_shape}, got {betas.shape}"
|
| 159 |
+
raise RuntimeError(msg)
|
| 160 |
+
stims = prepared_betas[1]
|
| 161 |
+
if len(stims) != n_trials:
|
| 162 |
+
msg = f"Expected {n_trials} stimuli, got {len(stims)}"
|
| 163 |
+
raise RuntimeError(msg)
|
| 164 |
+
|
| 165 |
+
root = self.path / "stimuli/stimulus_set/stimuli/"
|
| 166 |
+
for split in ("train", "test"):
|
| 167 |
+
num_expected = (
|
| 168 |
+
self.N_TRIALS_TRAIN if split == "train" else self.N_TRIALS_TEST
|
| 169 |
+
)
|
| 170 |
+
num_found = len(list((root / split).iterdir()))
|
| 171 |
+
if num_found != num_expected:
|
| 172 |
+
msg = f"Expecting {num_expected} stimuli for split {split}"
|
| 173 |
+
msg += f" but found {num_found}. Please download again"
|
| 174 |
+
raise RuntimeError(msg)
|
| 175 |
+
|
| 176 |
+
def iter_timelines(self) -> tp.Iterator[dict[str, tp.Any]]:
|
| 177 |
+
for subj in range(1, self.NUM_SUBJECTS + 1):
|
| 178 |
+
for ses in (2, 3, 4, 5):
|
| 179 |
+
for split, n_runs in self.NUM_RUNS_PER_SPLIT.items():
|
| 180 |
+
for run in range(1, n_runs + 1):
|
| 181 |
+
yield dict(subject=subj, session=ses, split=split, run=run)
|
| 182 |
+
|
| 183 |
+
def _load_timeline_events(self, timeline: dict[str, tp.Any]) -> pd.DataFrame:
|
| 184 |
+
tl = dict(timeline)
|
| 185 |
+
split = tl.pop("split")
|
| 186 |
+
info = study.SpecialLoader(method=self._load_raw, timeline=timeline).to_json()
|
| 187 |
+
n_vols = self.N_VOLUMES_TRAIN if split == "train" else self.N_VOLUMES_TEST
|
| 188 |
+
fmri = {
|
| 189 |
+
"filepath": info,
|
| 190 |
+
"type": "Fmri",
|
| 191 |
+
"start": 0.0,
|
| 192 |
+
"frequency": 1.0 / self.TR_FMRI_S,
|
| 193 |
+
"duration": n_vols * self.TR_FMRI_S,
|
| 194 |
+
}
|
| 195 |
+
bids_events_df_fp = get_bids_filepath(
|
| 196 |
+
root_path=self.path / "download",
|
| 197 |
+
filetype="events",
|
| 198 |
+
data_type="Fmri",
|
| 199 |
+
run_padding="01",
|
| 200 |
+
task=split,
|
| 201 |
+
**tl,
|
| 202 |
+
)
|
| 203 |
+
bids_events_df = read_bids_events(bids_events_df_fp)
|
| 204 |
+
|
| 205 |
+
bids_events_df = bids_events_df[bids_events_df.trial_type != "oddball"]
|
| 206 |
+
ns_events_df = self._get_ns_img_events_df(bids_events_df, timeline)
|
| 207 |
+
return pd.concat([pd.DataFrame([fmri]), ns_events_df], axis=0)
|
| 208 |
+
|
| 209 |
+
def _load_raw(
|
| 210 |
+
self, timeline: dict[str, tp.Any], space: str = "MNI152NLin2009cAsym"
|
| 211 |
+
) -> nibabel.Nifti2Image | nibabel.Nifti1Image:
|
| 212 |
+
if space in ["MNI152NLin2009cAsym", "T1w"]:
|
| 213 |
+
return get_masked_bold_image(*self._get_bold_images(timeline, space))
|
| 214 |
+
elif space in ["fsnative", "fsaverage"]:
|
| 215 |
+
return self._get_fs(timeline, space)
|
| 216 |
+
msg = f"{space} is not supported."
|
| 217 |
+
raise ValueError(msg)
|
| 218 |
+
|
| 219 |
+
def _get_ns_img_events_df(
|
| 220 |
+
self, bids_events_df: pd.DataFrame, timeline: dict[str, tp.Any]
|
| 221 |
+
) -> pd.DataFrame:
|
| 222 |
+
path_to_stimuli = self.path / "stimuli/stimulus_set/stimuli"
|
| 223 |
+
|
| 224 |
+
annot_path = (
|
| 225 |
+
self.path
|
| 226 |
+
/ "download/derivatives/stimuli_metadata/llm_frame_annotations.json"
|
| 227 |
+
)
|
| 228 |
+
with annot_path.open("r", encoding="utf8") as f:
|
| 229 |
+
middle_frame_captions = json.load(f)
|
| 230 |
+
|
| 231 |
+
bids_events = bids_events_df.to_dict("records")
|
| 232 |
+
ns_events = []
|
| 233 |
+
for bids_event in bids_events:
|
| 234 |
+
fp = Path(bids_event["stim_file"])
|
| 235 |
+
filepath = str(path_to_stimuli / fp)
|
| 236 |
+
captions = "\n".join(next(iter(middle_frame_captions[fp.stem].values())))
|
| 237 |
+
ns_event = dict(
|
| 238 |
+
type="Video",
|
| 239 |
+
start=bids_event["onset"],
|
| 240 |
+
filepath=filepath,
|
| 241 |
+
middle_frame_captions=captions,
|
| 242 |
+
)
|
| 243 |
+
ns_events.append(ns_event)
|
| 244 |
+
return pd.DataFrame(ns_events)
|
| 245 |
+
|
| 246 |
+
def _get_bold_images(self, timeline: dict[str, tp.Any], space: str):
|
| 247 |
+
timeline = dict(timeline)
|
| 248 |
+
timeline["task"] = timeline.pop("split")
|
| 249 |
+
kwargs = {
|
| 250 |
+
"root_path": self.path / self.DERIVATIVES_FOLDER,
|
| 251 |
+
"data_type": "Fmri",
|
| 252 |
+
"space": space,
|
| 253 |
+
"run_padding": "01",
|
| 254 |
+
**timeline,
|
| 255 |
+
}
|
| 256 |
+
bold = nibabel.load(get_bids_filepath(**kwargs, filetype="bold"), mmap=True)
|
| 257 |
+
mask = nibabel.load(
|
| 258 |
+
get_bids_filepath(**kwargs, filetype="bold_mask"), mmap=True
|
| 259 |
+
)
|
| 260 |
+
return (bold, mask)
|
| 261 |
+
|
| 262 |
+
def _get_fs(
|
| 263 |
+
self, timeline: dict[str, tp.Any], space: str = "fsaverage"
|
| 264 |
+
) -> nibabel.Nifti2Image:
|
| 265 |
+
tl = timeline
|
| 266 |
+
if space not in ["fsaverage", "fsnative"]:
|
| 267 |
+
msg = f"{space} is not supported. " "Only surfaces 'fsaverage' "
|
| 268 |
+
msg += "and 'fsnative' are supported for Lahner2024Bold."
|
| 269 |
+
raise ValueError(msg)
|
| 270 |
+
|
| 271 |
+
data = []
|
| 272 |
+
n_volumes = (
|
| 273 |
+
self.N_VOLUMES_TRAIN if tl["split"] == "train" else self.N_VOLUMES_TEST
|
| 274 |
+
)
|
| 275 |
+
for hemi in ("L", "R"):
|
| 276 |
+
fp = (
|
| 277 |
+
self.path
|
| 278 |
+
/ self.DERIVATIVES_FOLDER
|
| 279 |
+
/ f"sub-{int(tl['subject']):02}/ses-{tl['session']:02}"
|
| 280 |
+
/ f"func/sub-{int(tl['subject']):02}_ses-{tl['session']:02}_task-{tl['split']}"
|
| 281 |
+
f"_run-{tl['run']}_hemi-{hemi}_space-{space}_bold.func.gii"
|
| 282 |
+
)
|
| 283 |
+
hemi_data = nibabel.load(fp, mmap=True).darrays # type: ignore
|
| 284 |
+
if len(hemi_data) != n_volumes:
|
| 285 |
+
msg = f"Expected {n_volumes} volumes, got {len(hemi_data)}"
|
| 286 |
+
raise RuntimeError(msg)
|
| 287 |
+
if space == "fsaverage" and hemi_data[0].data.shape != (163842,):
|
| 288 |
+
msg = f"Expected shape (163842,), got {hemi_data[0].data.shape}"
|
| 289 |
+
raise RuntimeError(msg)
|
| 290 |
+
np_data = np.stack([darray.data for darray in hemi_data], -1)
|
| 291 |
+
data.append(np_data)
|
| 292 |
+
data = np.concatenate(data, axis=0)
|
| 293 |
+
return nibabel.Nifti2Image(data, np.eye(4))
|
src/cortexlab/data/studies/lebel2023bold.py
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
"""Natural language fMRI dataset: 3T fMRI responses to spoken narrative stories.
|
| 7 |
+
|
| 8 |
+
This dataset provides fMRI data from participants listening to natural spoken
|
| 9 |
+
narratives (stories) during 3T scanning. The stimuli include various narrative
|
| 10 |
+
audio stories with detailed word-level and phoneme-level annotations. The dataset
|
| 11 |
+
is designed for studying natural language processing in the brain.
|
| 12 |
+
|
| 13 |
+
Experimental Design:
|
| 14 |
+
- 3T fMRI recordings (TR = 2.0 seconds)
|
| 15 |
+
- 8 subjects (UTS01-UTS08)
|
| 16 |
+
- Subjects 1-3: 82 stories across 20 sessions (extended dataset)
|
| 17 |
+
- Subjects 4-8: 26-27 stories across 6 sessions
|
| 18 |
+
- Paradigm: passive listening to naturalistic spoken narratives
|
| 19 |
+
* Audio narratives with 10-second blank period before story onset
|
| 20 |
+
* Test story: "wheretheressmoke" (with 10 runs)
|
| 21 |
+
* Training stories: diverse narrative content
|
| 22 |
+
- Localizer tasks included: AudioMotorLocalizer, AuditoryLocalizer,
|
| 23 |
+
CategoryLocalizer, MotorLocalizer
|
| 24 |
+
|
| 25 |
+
Data Format:
|
| 26 |
+
- BIDS-compliant dataset structure (OpenNeuro ds003020)
|
| 27 |
+
- Two preprocessing versions available (see Study Classes below)
|
| 28 |
+
- Audio files: WAV format
|
| 29 |
+
- Event annotations (from TextGrid files)
|
| 30 |
+
* Word-level timing and text
|
| 31 |
+
* Phoneme-level timing and text
|
| 32 |
+
* Audio file paths
|
| 33 |
+
|
| 34 |
+
Study Classes:
|
| 35 |
+
1. **Lebel2023Bold**: Uses deepprep preprocessing pipeline
|
| 36 |
+
- Available spaces: T1w, MNI152NLin6Asym, fsaverage, fsnative
|
| 37 |
+
- 432 timelines (all sessions/runs)
|
| 38 |
+
- Full BIDS structure with multiple space outputs
|
| 39 |
+
|
| 40 |
+
2. **LebelProcessed2023Bold**: Uses authors' custom HDF5 preprocessing
|
| 41 |
+
- Custom cortical surface registration
|
| 42 |
+
- 200 timelines (aggregated by subject x task)
|
| 43 |
+
- Data stored in HDF5 format (.hf5 files)
|
| 44 |
+
- Custom voxel selection and masking
|
| 45 |
+
|
| 46 |
+
Download Requirements:
|
| 47 |
+
- OpenNeuro dataset: ds003020
|
| 48 |
+
- Dataset includes both raw fMRI data and preprocessed derivatives
|
| 49 |
+
- Audio stimuli (.wav files) and TextGrid annotations included
|
| 50 |
+
- Deepprep derivatives for Lebel2023Bold
|
| 51 |
+
- HDF5 preprocessed data for LebelProcessed2023Bold
|
| 52 |
+
- Python packages:
|
| 53 |
+
* nltk (v3.8.1) for TextGrid parsing
|
| 54 |
+
* nltk_contrib (from GitHub) for TextGrid file format
|
| 55 |
+
* soundfile (>=0.13.1) for audio handling
|
| 56 |
+
* h5py (>=3.10.0) for HDF5 files (LebelProcessed2023Bold only)
|
| 57 |
+
* pycortex (for cortical surface visualization, LebelProcessed2023Bold only)
|
| 58 |
+
|
| 59 |
+
Issues and Considerations:
|
| 60 |
+
- Subject UTS02: Different scan location and protocol, no localizer data
|
| 61 |
+
- Subject UTS04: Missing "life.hf5" story scan
|
| 62 |
+
- Subject UTS05: Low visual acuity, presented auditory cues
|
| 63 |
+
- UTS01/ses-7/treasureisland: Corrupted NIfTI file, automatically skipped
|
| 64 |
+
- Preprocessed data has additional 20s removed from beginning
|
| 65 |
+
- Original preprocessing: https://github.com/HuthLab/deep-fMRI-dataset
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
import logging
|
| 69 |
+
import typing as tp
|
| 70 |
+
from pathlib import Path
|
| 71 |
+
|
| 72 |
+
import numpy as np
|
| 73 |
+
import pandas as pd
|
| 74 |
+
from neuralset.events import study
|
| 75 |
+
|
| 76 |
+
logger = logging.getLogger(__name__)
|
| 77 |
+
|
| 78 |
+
_DEFAULT_BAD_WORDS = frozenset(
|
| 79 |
+
[
|
| 80 |
+
"sentence_start",
|
| 81 |
+
"sentence_end",
|
| 82 |
+
"br",
|
| 83 |
+
"lg",
|
| 84 |
+
"ls",
|
| 85 |
+
"ns",
|
| 86 |
+
"sp",
|
| 87 |
+
"{BR}",
|
| 88 |
+
"{LG}",
|
| 89 |
+
"{LS}",
|
| 90 |
+
"{NS}",
|
| 91 |
+
"{SP}",
|
| 92 |
+
]
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
_ANAT_TASKS = [
|
| 96 |
+
"AudioMotorLocalizer",
|
| 97 |
+
"AuditoryLocalizer",
|
| 98 |
+
"CategoryLocalizer",
|
| 99 |
+
"MotorLocalizer",
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
SUBJECTS = [f"UTS{i:02d}" for i in range(1, 9)]
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _get_audio_file(path: Path | str, task: str) -> Path:
|
| 106 |
+
path = Path(path)
|
| 107 |
+
return path / f"stimuli/{task}.wav"
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _get_audio_text_file(path: Path | str, task: str) -> Path:
|
| 111 |
+
path = Path(path)
|
| 112 |
+
return path / f"derivative/TextGrids/{task}.TextGrid"
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _create_audio_events(path: Path | str, task: str) -> list[dict]:
|
| 116 |
+
events = []
|
| 117 |
+
dl_path = Path(path)
|
| 118 |
+
audio_text_file_name = _get_audio_text_file(dl_path, task)
|
| 119 |
+
audio_wav_file_name = _get_audio_file(dl_path, task)
|
| 120 |
+
|
| 121 |
+
split = "train" if task != "wheretheressmoke" else "test"
|
| 122 |
+
|
| 123 |
+
events.append(
|
| 124 |
+
dict(
|
| 125 |
+
start=0.0,
|
| 126 |
+
type="Audio",
|
| 127 |
+
language="english",
|
| 128 |
+
filepath=str(audio_wav_file_name),
|
| 129 |
+
split=split,
|
| 130 |
+
)
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
from nltk_contrib.textgrid import TextGrid
|
| 134 |
+
|
| 135 |
+
data = audio_text_file_name.read_text(encoding="utf-8")
|
| 136 |
+
fid = TextGrid(data)
|
| 137 |
+
|
| 138 |
+
for _, tier in enumerate(fid):
|
| 139 |
+
for recording in tier.simple_transcript:
|
| 140 |
+
start, stop, text = recording
|
| 141 |
+
if text != "" and text not in _DEFAULT_BAD_WORDS:
|
| 142 |
+
if tier.nameid == "phone":
|
| 143 |
+
tier_type = "Phoneme"
|
| 144 |
+
elif tier.nameid == "word":
|
| 145 |
+
tier_type = "Word"
|
| 146 |
+
else:
|
| 147 |
+
msg = "Tier must either be phone or word but tier.nameid is %s"
|
| 148 |
+
logger.warning(msg, tier.nameid)
|
| 149 |
+
events.append(
|
| 150 |
+
dict(
|
| 151 |
+
start=float(start),
|
| 152 |
+
text=text.lower(),
|
| 153 |
+
duration=float(stop) - float(start),
|
| 154 |
+
type=tier_type,
|
| 155 |
+
language="english",
|
| 156 |
+
filepath=str(audio_wav_file_name),
|
| 157 |
+
split=split,
|
| 158 |
+
)
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
return events
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def _get_preprocessed_responses(
|
| 165 |
+
path: Path | str, task: str, subject: str
|
| 166 |
+
) -> np.ndarray:
|
| 167 |
+
output = _get_response(Path(path), [task], subject)
|
| 168 |
+
return output
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def _get_hf5_path(path: Path | str, subject: str, task: str) -> Path | None:
|
| 172 |
+
path = Path(path).resolve()
|
| 173 |
+
hf5_path = path / "derivative" / "preprocessed_data" / subject / f"{task}.hf5"
|
| 174 |
+
if hf5_path.exists():
|
| 175 |
+
return hf5_path
|
| 176 |
+
return None
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _get_tasks(path: Path) -> list[str]:
|
| 180 |
+
path = Path(path).resolve()
|
| 181 |
+
dl_path = path / "stimuli"
|
| 182 |
+
tasks = []
|
| 183 |
+
for fp in dl_path.glob("*.wav"):
|
| 184 |
+
tasks.append(fp.stem)
|
| 185 |
+
return tasks
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def _get_response(path: Path | str, stories, subject) -> np.ndarray:
|
| 189 |
+
"""Get the subject"s fMRI response for stories."""
|
| 190 |
+
import h5py
|
| 191 |
+
|
| 192 |
+
path = Path(path).resolve()
|
| 193 |
+
base_path = path / f"download/ds003020/derivative/preprocessed_data/{subject}"
|
| 194 |
+
resp = []
|
| 195 |
+
for story in stories:
|
| 196 |
+
resp_path = base_path / f"{story}.hf5"
|
| 197 |
+
hf = h5py.File(resp_path, "r")
|
| 198 |
+
resp.extend(hf["data"][:])
|
| 199 |
+
hf.close()
|
| 200 |
+
return np.array(resp)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class Lebel2023Bold(study.Study):
|
| 204 |
+
device: tp.ClassVar[str] = "Fmri"
|
| 205 |
+
licence: tp.ClassVar[str] = "CC0"
|
| 206 |
+
description: tp.ClassVar[str] = (
|
| 207 |
+
"Natural language fMRI: 3T fMRI responses from 8 subjects listening to "
|
| 208 |
+
"spoken narrative stories. Deepprep preprocessing with multiple output spaces "
|
| 209 |
+
"(T1w, MNI152NLin6Asym, fsaverage, fsnative). 432 timelines with word and "
|
| 210 |
+
"phoneme-level annotations. Test story: 'wheretheressmoke'."
|
| 211 |
+
)
|
| 212 |
+
bibtex: tp.ClassVar[
|
| 213 |
+
str
|
| 214 |
+
] = """
|
| 215 |
+
@article{lebel2023natural,
|
| 216 |
+
title={A natural language fMRI dataset for voxelwise encoding models},
|
| 217 |
+
author={LeBel, Amanda and Wagner, Lauren and Jain, Shailee and Adhikari-Desai, Aneesh and Gupta, Bhavin and Morgenthal, Allyson and Tang, Jerry and Xu, Lixiang and Huth, Alexander G},
|
| 218 |
+
journal={Scientific Data},
|
| 219 |
+
volume={10},
|
| 220 |
+
number={1},
|
| 221 |
+
pages={555},
|
| 222 |
+
year={2023},
|
| 223 |
+
publisher={Nature Publishing Group UK London},
|
| 224 |
+
doi={https://doi.org/10.1038/s41597-023-02437-z},
|
| 225 |
+
url={https://www.nature.com/articles/s41597-023-02437-z}
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
@dataset{lebel2023bold,
|
| 229 |
+
title={A natural language fMRI dataset for voxelwise encoding models},
|
| 230 |
+
author={LeBel, Amanda and Wagner, Lauren and Jain, Shailee and Adhikari-Desai, Aneesh and
|
| 231 |
+
Gupta, Bhavin and Morgenthal, Alyssa and Tang, Jerry and Xu, Lixiang and Huth, Alexander G},
|
| 232 |
+
year={2023},
|
| 233 |
+
publisher={OpenNeuro},
|
| 234 |
+
doi={10.18112/openneuro.ds003020.v2.2.0},
|
| 235 |
+
url={https://openneuro.org/datasets/ds003020}
|
| 236 |
+
}
|
| 237 |
+
"""
|
| 238 |
+
requirements: tp.ClassVar[tuple[str, ...]] = (
|
| 239 |
+
"nltk==3.8.1",
|
| 240 |
+
"git+https://github.com/nltk/nltk_contrib.git@683961c53f0c122b90fe2d039fe795e0a2b3e997",
|
| 241 |
+
"soundfile>=0.13.1",
|
| 242 |
+
)
|
| 243 |
+
_info: tp.ClassVar[study.StudyInfo] = study.StudyInfo(
|
| 244 |
+
num_timelines=432,
|
| 245 |
+
num_subjects=8,
|
| 246 |
+
num_events_in_query=9199,
|
| 247 |
+
event_types_in_query={"Fmri", "Audio", "Word", "Phoneme"},
|
| 248 |
+
data_shape=(57, 65, 56, 363),
|
| 249 |
+
frequency=0.5,
|
| 250 |
+
fmri_spaces=("T1w", "MNI152NLin6Asym", "fsaverage", "fsnative"),
|
| 251 |
+
)
|
| 252 |
+
TR_FMRI_S: tp.ClassVar[float] = 2.0
|
| 253 |
+
DERIVATIVES_FOLDER: tp.ClassVar[str] = "download/ds003020-fmriprep"
|
| 254 |
+
|
| 255 |
+
def model_post_init(self, __context: tp.Any) -> None:
|
| 256 |
+
super().model_post_init(__context)
|
| 257 |
+
self.infra_timelines.version = "v3.4"
|
| 258 |
+
|
| 259 |
+
def _download(self) -> None:
|
| 260 |
+
raise NotImplementedError("Download method not implemented yet")
|
| 261 |
+
|
| 262 |
+
def iter_timelines(self) -> tp.Iterator[dict[str, tp.Any]]:
|
| 263 |
+
"""
|
| 264 |
+
Iterate over the different recording timelines:
|
| 265 |
+
e.g. subjects x sessions in order with fmri runs
|
| 266 |
+
"""
|
| 267 |
+
dl_dir = self.path / "download/ds003020"
|
| 268 |
+
if not dl_dir.exists():
|
| 269 |
+
raise RuntimeError(f"Missing folder {dl_dir}")
|
| 270 |
+
|
| 271 |
+
for subject in SUBJECTS:
|
| 272 |
+
sessions = 20 if subject in ["UTS01", "UTS02", "UTS03"] else 6
|
| 273 |
+
|
| 274 |
+
for sess in range(1, sessions + 1):
|
| 275 |
+
sess_dir = dl_dir / f"sub-{subject}" / f"ses-{sess}" / "func"
|
| 276 |
+
tasks = [task.name for task in sess_dir.glob("*_bold.nii.gz")]
|
| 277 |
+
tasks = sorted({task.split("_")[2].split("-")[1] for task in tasks})
|
| 278 |
+
for task in tasks:
|
| 279 |
+
if task.startswith(tuple(_ANAT_TASKS)):
|
| 280 |
+
continue
|
| 281 |
+
if subject == "UTS01" and sess == 7 and task == "treasureisland":
|
| 282 |
+
msg = "Skipping subject=UTS01, session=7, task=treasureisland as nii.gz is corrupted."
|
| 283 |
+
logger.warning(msg)
|
| 284 |
+
continue
|
| 285 |
+
|
| 286 |
+
runs = (
|
| 287 |
+
list(range(1, 11)) + [None]
|
| 288 |
+
if task == "wheretheressmoke"
|
| 289 |
+
else [None]
|
| 290 |
+
)
|
| 291 |
+
for run in runs:
|
| 292 |
+
run_infix = f"_run-{run}" if run is not None else ""
|
| 293 |
+
filename = f"sub-{subject}_ses-{sess}_task-{task}{run_infix}_bold.nii.gz"
|
| 294 |
+
bids_path = sess_dir / filename
|
| 295 |
+
if not bids_path.exists():
|
| 296 |
+
continue
|
| 297 |
+
|
| 298 |
+
audio_text_file = _get_audio_text_file(path=dl_dir, task=task)
|
| 299 |
+
if not audio_text_file.exists():
|
| 300 |
+
raise RuntimeError(
|
| 301 |
+
f"Missing audio text file: {audio_text_file}"
|
| 302 |
+
)
|
| 303 |
+
audio_file = _get_audio_file(path=dl_dir, task=task)
|
| 304 |
+
if not audio_file.exists():
|
| 305 |
+
raise RuntimeError(f"Missing audio file: {audio_file}")
|
| 306 |
+
|
| 307 |
+
yield dict(
|
| 308 |
+
subject=subject, session=str(sess), task=task, run=run
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
def _load_timeline_events(self, timeline: dict[str, tp.Any]) -> pd.DataFrame:
|
| 312 |
+
"""Reads the events of a given timeline"""
|
| 313 |
+
|
| 314 |
+
task = timeline["task"]
|
| 315 |
+
freq = 1.0 / self.TR_FMRI_S
|
| 316 |
+
events = _create_audio_events(self.path / "download/ds003020", task)
|
| 317 |
+
subject, session, task, run = (
|
| 318 |
+
timeline["subject"],
|
| 319 |
+
timeline["session"],
|
| 320 |
+
timeline["task"],
|
| 321 |
+
timeline["run"],
|
| 322 |
+
)
|
| 323 |
+
run_substr = f"_run-{run}" if run is not None else ""
|
| 324 |
+
fp = (
|
| 325 |
+
self.path
|
| 326 |
+
/ self.DERIVATIVES_FOLDER
|
| 327 |
+
/ f"sub-{subject}/ses-{session}/func"
|
| 328 |
+
/ f"sub-{subject}_ses-{session}_task-{task}{run_substr}_*"
|
| 329 |
+
)
|
| 330 |
+
events.append(
|
| 331 |
+
dict(
|
| 332 |
+
type="Fmri",
|
| 333 |
+
start=0.0,
|
| 334 |
+
filepath=fp,
|
| 335 |
+
layout="fmriprep",
|
| 336 |
+
frequency=freq,
|
| 337 |
+
split="train" if task != "wheretheressmoke" else "test",
|
| 338 |
+
)
|
| 339 |
+
)
|
| 340 |
+
out = pd.DataFrame(events)
|
| 341 |
+
out.loc[out.type != "Fmri", "start"] += 10
|
| 342 |
+
out["task"] = task
|
| 343 |
+
out.loc[out.type != "Fmri", "modality"] = "heard"
|
| 344 |
+
return out
|
src/cortexlab/data/studies/wen2017.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import typing as tp
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
import pandas as pd
|
| 11 |
+
from neuralset.events import study
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _get_nii_file(path: Path | str, subject: str, seg: str, fmri_run: int) -> Path:
|
| 15 |
+
path = Path(path)
|
| 16 |
+
seg_dir = path / subject / "fmri" / seg
|
| 17 |
+
nii = seg_dir / "mni" / f"{seg}_{fmri_run}_mni.nii.gz"
|
| 18 |
+
# Outrageously, some test files have a different
|
| 19 |
+
# naming convention...
|
| 20 |
+
if not nii.exists():
|
| 21 |
+
nii = seg_dir / "mni" / f"{seg}_{fmri_run}.mni.nii.gz"
|
| 22 |
+
assert nii.exists(), f"Missing file {nii} for {subject!r} and {seg!r}"
|
| 23 |
+
return nii
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _get_video_file(path: Path | str, seg: str) -> Path:
|
| 27 |
+
path = Path(path)
|
| 28 |
+
return path / f"stimuli/{seg}.mp4"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class Wen2017(study.Study):
|
| 32 |
+
device: tp.ClassVar[str] = "Fmri"
|
| 33 |
+
licence: tp.ClassVar[str] = "CC-BY 0"
|
| 34 |
+
url: tp.ClassVar[str] = "https://academic.oup.com/cercor/article/28/12/4136/4560155"
|
| 35 |
+
TR_FMRI_S: tp.ClassVar[float] = 2.0 # don't rely on nifti header
|
| 36 |
+
|
| 37 |
+
def _download(self) -> None:
|
| 38 |
+
raise NotImplementedError("Download method not implemented yet")
|
| 39 |
+
|
| 40 |
+
def iter_timelines(self) -> tp.Iterator[dict[str, tp.Any]]:
|
| 41 |
+
base = self.path / "download" / "video_fmri_dataset"
|
| 42 |
+
for subject_dir in base.iterdir():
|
| 43 |
+
subject = subject_dir.name
|
| 44 |
+
if not subject.startswith("subject") or not subject_dir.is_dir():
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
for seg_dir in (subject_dir / "fmri").iterdir():
|
| 48 |
+
seg = seg_dir.name
|
| 49 |
+
is_train = seg.startswith("seg")
|
| 50 |
+
is_test = seg.startswith("test")
|
| 51 |
+
if not (is_train or is_test):
|
| 52 |
+
continue
|
| 53 |
+
file = _get_video_file(base, seg)
|
| 54 |
+
if not file.exists():
|
| 55 |
+
raise FileNotFoundError(f"Missing video file: {file}")
|
| 56 |
+
|
| 57 |
+
fmri_runs = range(1, 3) if is_train else range(1, 11)
|
| 58 |
+
for run_ in fmri_runs:
|
| 59 |
+
nii = _get_nii_file(base, subject, seg, run_)
|
| 60 |
+
if not nii.exists():
|
| 61 |
+
raise FileNotFoundError(f"Missing nii file: {nii}")
|
| 62 |
+
|
| 63 |
+
yield dict(subject=subject, seg=seg, run=run_)
|
| 64 |
+
|
| 65 |
+
def _load_timeline_events(self, timeline: dict[str, tp.Any]) -> pd.DataFrame:
|
| 66 |
+
import nibabel
|
| 67 |
+
|
| 68 |
+
tl = timeline
|
| 69 |
+
base = self.path / "download" / "video_fmri_dataset"
|
| 70 |
+
video_file = _get_video_file(base, tl["seg"])
|
| 71 |
+
nii_file = _get_nii_file(base, tl["subject"], tl["seg"], tl["run"])
|
| 72 |
+
nii: tp.Any = nibabel.load(nii_file, mmap=True)
|
| 73 |
+
freq = 1.0 / self.TR_FMRI_S
|
| 74 |
+
dur = nii.shape[-1] / freq
|
| 75 |
+
fmri = dict(
|
| 76 |
+
type="Fmri", start=0, filepath=nii_file, frequency=freq, duration=dur
|
| 77 |
+
)
|
| 78 |
+
return pd.DataFrame([dict(type="Video", start=0, filepath=video_file), fmri])
|
src/cortexlab/data/transforms.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import contextlib
|
| 8 |
+
import copy
|
| 9 |
+
import logging
|
| 10 |
+
import os
|
| 11 |
+
import typing as tp
|
| 12 |
+
import warnings
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
|
| 15 |
+
import exca
|
| 16 |
+
import neuralset.events.etypes as ev
|
| 17 |
+
import pandas as pd
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
from neuralset.events.transforms import EventsTransform
|
| 22 |
+
from neuralset.events.transforms.utils import DeterministicSplitter
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
|
| 25 |
+
SPLIT_ATTRIBUTES = {
|
| 26 |
+
"Algonauts2025Bold": "chunk",
|
| 27 |
+
"Algonauts2025": "chunk",
|
| 28 |
+
"Lebel2023Bold": "task",
|
| 29 |
+
"Nastase2020": "story",
|
| 30 |
+
"Wen2017": "seg",
|
| 31 |
+
"Wenvtwo2017": "run",
|
| 32 |
+
"Lahner2024Bold": "timeline",
|
| 33 |
+
"Vanessen2023": "run",
|
| 34 |
+
"Aliko2020": "task",
|
| 35 |
+
"Li2022": "run",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def assign_splits(
|
| 40 |
+
events: pd.DataFrame, splitter: tp.Callable[str, str]
|
| 41 |
+
) -> pd.DataFrame:
|
| 42 |
+
assert events.study.nunique() == 1, "Only one study can be assigned at a time"
|
| 43 |
+
study_name = events.study.unique()[0]
|
| 44 |
+
split_by = SPLIT_ATTRIBUTES[study_name]
|
| 45 |
+
events["split_attr"] = events[split_by].astype(str)
|
| 46 |
+
values = events["split_attr"].unique()
|
| 47 |
+
# check that all rows have split attr assigned
|
| 48 |
+
unassigned_event_types = events[events.split_attr.isna()].type.unique().tolist()
|
| 49 |
+
if len(unassigned_event_types) > 0:
|
| 50 |
+
msg = f"Study {study_name}: The following events do not have a split assigned and will be removed: {unassigned_event_types}"
|
| 51 |
+
if any(
|
| 52 |
+
[
|
| 53 |
+
name.capitalize() in unassigned_event_types
|
| 54 |
+
for name in ["Fmri", "Video", "Audio", "Word"]
|
| 55 |
+
]
|
| 56 |
+
):
|
| 57 |
+
raise ValueError(msg)
|
| 58 |
+
else:
|
| 59 |
+
events = events[~events.type.isin(unassigned_event_types)]
|
| 60 |
+
warnings.warn(msg)
|
| 61 |
+
splits = [splitter(value) for value in values]
|
| 62 |
+
if splits and "val" not in splits:
|
| 63 |
+
splits[-1] = "val" # need at least one val split
|
| 64 |
+
val_to_split = dict(zip(values, splits))
|
| 65 |
+
events["split"] = events["split_attr"].map(val_to_split)
|
| 66 |
+
return events
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class SplitEvents(EventsTransform):
|
| 70 |
+
val_ratio: float
|
| 71 |
+
|
| 72 |
+
def _run(self, events: pd.DataFrame) -> pd.DataFrame:
|
| 73 |
+
|
| 74 |
+
splitter = DeterministicSplitter(
|
| 75 |
+
ratios={"train": 1 - self.val_ratio, "val": self.val_ratio}, seed=42
|
| 76 |
+
)
|
| 77 |
+
tmp = []
|
| 78 |
+
for _, study_events in events.groupby("study"):
|
| 79 |
+
study_events = assign_splits(study_events, splitter)
|
| 80 |
+
tmp.append(study_events)
|
| 81 |
+
events = pd.concat(tmp)
|
| 82 |
+
|
| 83 |
+
return events
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class ExtractWordsFromAudio(EventsTransform):
|
| 87 |
+
"""
|
| 88 |
+
Language is hard-coded because auto-detection in performed on first 30s of audio, which can be empty e.g. for movies.
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
language: str = "english"
|
| 92 |
+
overwrite: bool = False
|
| 93 |
+
|
| 94 |
+
@staticmethod
|
| 95 |
+
def _get_transcript_from_audio(wav_filename: Path, language: str) -> pd.DataFrame:
|
| 96 |
+
import json
|
| 97 |
+
import os
|
| 98 |
+
import subprocess
|
| 99 |
+
import tempfile
|
| 100 |
+
|
| 101 |
+
language_codes = dict(
|
| 102 |
+
english="en", french="fr", spanish="es", dutch="nl", chinese="zh"
|
| 103 |
+
)
|
| 104 |
+
if language not in language_codes:
|
| 105 |
+
raise ValueError(f"Language {language} not supported")
|
| 106 |
+
|
| 107 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 108 |
+
compute_type = "float16"
|
| 109 |
+
|
| 110 |
+
with tempfile.TemporaryDirectory() as output_dir:
|
| 111 |
+
logger.info("Running whisperx via uvx...")
|
| 112 |
+
cmd = [
|
| 113 |
+
"uvx",
|
| 114 |
+
"whisperx",
|
| 115 |
+
str(wav_filename),
|
| 116 |
+
"--model",
|
| 117 |
+
"large-v3",
|
| 118 |
+
"--language",
|
| 119 |
+
language_codes[language],
|
| 120 |
+
"--device",
|
| 121 |
+
device,
|
| 122 |
+
"--compute_type",
|
| 123 |
+
compute_type,
|
| 124 |
+
"--batch_size",
|
| 125 |
+
"16",
|
| 126 |
+
"--align_model",
|
| 127 |
+
"WAV2VEC2_ASR_LARGE_LV60K_960H" if language == "english" else "",
|
| 128 |
+
"--output_dir",
|
| 129 |
+
output_dir,
|
| 130 |
+
"--output_format",
|
| 131 |
+
"json",
|
| 132 |
+
]
|
| 133 |
+
cmd = [c for c in cmd if c] # remove empty args
|
| 134 |
+
env = {k: v for k, v in os.environ.items() if k != "MPLBACKEND"}
|
| 135 |
+
result = subprocess.run(cmd, capture_output=True, text=True, env=env)
|
| 136 |
+
if result.returncode != 0:
|
| 137 |
+
raise RuntimeError(f"whisperx failed:\n{result.stderr}")
|
| 138 |
+
|
| 139 |
+
json_path = Path(output_dir) / f"{wav_filename.stem}.json"
|
| 140 |
+
transcript = json.loads(json_path.read_text())
|
| 141 |
+
|
| 142 |
+
words = []
|
| 143 |
+
for i, segment in enumerate(transcript["segments"]):
|
| 144 |
+
sentence = segment["text"]
|
| 145 |
+
sentence = sentence.replace('"', "")
|
| 146 |
+
for word in segment["words"]:
|
| 147 |
+
if "start" not in word:
|
| 148 |
+
continue
|
| 149 |
+
word_dict = {
|
| 150 |
+
"text": word["word"].replace('"', ""),
|
| 151 |
+
"start": word["start"],
|
| 152 |
+
"duration": word["end"] - word["start"],
|
| 153 |
+
"sequence_id": i,
|
| 154 |
+
"sentence": sentence,
|
| 155 |
+
}
|
| 156 |
+
words.append(word_dict)
|
| 157 |
+
|
| 158 |
+
transcript = pd.DataFrame(words)
|
| 159 |
+
return transcript
|
| 160 |
+
|
| 161 |
+
def _run(self, events: pd.DataFrame) -> pd.DataFrame:
|
| 162 |
+
if "Word" in events.type.unique():
|
| 163 |
+
logger.warning("Words already present in the events dataframe, skipping")
|
| 164 |
+
return events
|
| 165 |
+
audio_events = events.loc[events.type == "Audio"]
|
| 166 |
+
transcripts = {}
|
| 167 |
+
for wav_filename in tqdm(
|
| 168 |
+
audio_events.filepath.unique(),
|
| 169 |
+
total=len(audio_events.filepath.unique()),
|
| 170 |
+
desc="Extracting words from audio",
|
| 171 |
+
):
|
| 172 |
+
wav_filename = Path(wav_filename)
|
| 173 |
+
transcript_filename = wav_filename.with_suffix(".tsv")
|
| 174 |
+
if transcript_filename.exists() and not self.overwrite:
|
| 175 |
+
try:
|
| 176 |
+
transcript = pd.read_csv(transcript_filename, sep="\t")
|
| 177 |
+
except pd.errors.EmptyDataError:
|
| 178 |
+
transcript = pd.DataFrame()
|
| 179 |
+
logger.warning(f"Empty transcript file {transcript_filename}")
|
| 180 |
+
else:
|
| 181 |
+
transcript = self._get_transcript_from_audio(
|
| 182 |
+
wav_filename, self.language
|
| 183 |
+
)
|
| 184 |
+
transcript.to_csv(transcript_filename, sep="\t", index=False)
|
| 185 |
+
logger.info(f"Wrote transcript to {transcript_filename}")
|
| 186 |
+
transcripts[str(wav_filename)] = transcript
|
| 187 |
+
all_transcripts = []
|
| 188 |
+
for audio_event in audio_events.itertuples():
|
| 189 |
+
transcript = copy.deepcopy(transcripts[audio_event.filepath])
|
| 190 |
+
if len(transcript) == 0:
|
| 191 |
+
continue
|
| 192 |
+
for k, v in audio_event._asdict().items():
|
| 193 |
+
if k in (
|
| 194 |
+
"frequency",
|
| 195 |
+
"filepath",
|
| 196 |
+
"type",
|
| 197 |
+
"start",
|
| 198 |
+
"duration",
|
| 199 |
+
"offset",
|
| 200 |
+
):
|
| 201 |
+
continue
|
| 202 |
+
transcript.loc[:, k] = v
|
| 203 |
+
transcript["type"] = "Word"
|
| 204 |
+
transcript["language"] = self.language
|
| 205 |
+
transcript["start"] += audio_event.start + audio_event.offset
|
| 206 |
+
all_transcripts.append(transcript)
|
| 207 |
+
|
| 208 |
+
if all_transcripts:
|
| 209 |
+
events = pd.concat([events, pd.concat(all_transcripts)], ignore_index=True)
|
| 210 |
+
else:
|
| 211 |
+
logger.warning("No transcripts found, skipping")
|
| 212 |
+
return events
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
class CreateVideosFromImages(EventsTransform):
|
| 216 |
+
fps: int = 10
|
| 217 |
+
remove_images: bool = True
|
| 218 |
+
infra: exca.MapInfra = exca.MapInfra(cluster="processpool")
|
| 219 |
+
|
| 220 |
+
@infra.apply(
|
| 221 |
+
item_uid=lambda image_event: f"{image_event.filepath}_{image_event.duration}"
|
| 222 |
+
)
|
| 223 |
+
def create_video(self, image_events: list[ev.Image]) -> tp.Iterator[ev.Video]:
|
| 224 |
+
for image_event in image_events:
|
| 225 |
+
image_filepath = Path(image_event.filepath)
|
| 226 |
+
video_filepath = (
|
| 227 |
+
Path(self.infra.uid_folder(create=True))
|
| 228 |
+
/ f"{image_filepath.stem}_{image_event.duration}.mp4"
|
| 229 |
+
)
|
| 230 |
+
from moviepy import ImageClip
|
| 231 |
+
|
| 232 |
+
video_filepath.parent.mkdir(parents=True, exist_ok=True)
|
| 233 |
+
clip = ImageClip(str(image_filepath), duration=image_event.duration)
|
| 234 |
+
with (
|
| 235 |
+
open(os.devnull, "w") as devnull,
|
| 236 |
+
contextlib.redirect_stdout(devnull),
|
| 237 |
+
contextlib.redirect_stderr(devnull),
|
| 238 |
+
):
|
| 239 |
+
clip.write_videofile(
|
| 240 |
+
video_filepath, codec="libx264", audio=False, fps=self.fps
|
| 241 |
+
)
|
| 242 |
+
video_event = ev.Video.from_dict(
|
| 243 |
+
image_event.to_dict()
|
| 244 |
+
| {
|
| 245 |
+
"type": "Video",
|
| 246 |
+
"filepath": str(video_filepath),
|
| 247 |
+
"frequency": self.fps,
|
| 248 |
+
}
|
| 249 |
+
)
|
| 250 |
+
yield video_event
|
| 251 |
+
|
| 252 |
+
def _run(self, events: pd.DataFrame) -> pd.DataFrame:
|
| 253 |
+
images = events.loc[events.type == "Image"]
|
| 254 |
+
image_events = []
|
| 255 |
+
for image in tqdm(
|
| 256 |
+
images.itertuples(), total=len(images), desc="Extracting image events"
|
| 257 |
+
):
|
| 258 |
+
image_events.append(ev.Image.from_dict(image._asdict()))
|
| 259 |
+
video_events = [
|
| 260 |
+
video_event.to_dict() for video_event in self.create_video(image_events)
|
| 261 |
+
]
|
| 262 |
+
events = pd.concat([events, pd.DataFrame(video_events)], ignore_index=True)
|
| 263 |
+
if self.remove_images:
|
| 264 |
+
events = events.loc[events.type != "Image"]
|
| 265 |
+
return events.reset_index(drop=True)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
class RemoveDuplicates(EventsTransform):
|
| 269 |
+
subset: str | tp.Sequence[str] = "filepath"
|
| 270 |
+
|
| 271 |
+
def _run(self, events: pd.DataFrame) -> pd.DataFrame:
|
| 272 |
+
events = events.drop_duplicates(subset=self.subset)
|
| 273 |
+
return events
|
src/cortexlab/inference/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cortexlab.inference.attribution import ModalityAttributor
|
| 2 |
+
from cortexlab.inference.streaming import StreamingPredictor
|
| 3 |
+
|
| 4 |
+
__all__ = ["ModalityAttributor", "StreamingPredictor"]
|
src/cortexlab/inference/attribution.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Modality importance scoring via ablation.
|
| 2 |
+
|
| 3 |
+
:class:`ModalityAttributor` measures how much text, audio, and video
|
| 4 |
+
each contribute to the predicted brain response at every vertex by
|
| 5 |
+
comparing the full prediction against predictions with each modality
|
| 6 |
+
zeroed out.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import torch
|
| 15 |
+
from neuralset.dataloader import SegmentData
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class ModalityAttributor:
|
| 21 |
+
"""Score per-vertex importance of each input modality.
|
| 22 |
+
|
| 23 |
+
Uses an ablation approach: for each modality, zero out its features
|
| 24 |
+
and measure the change in predicted brain activation. Larger changes
|
| 25 |
+
mean the modality matters more for that vertex.
|
| 26 |
+
|
| 27 |
+
Parameters
|
| 28 |
+
----------
|
| 29 |
+
model : torch.nn.Module
|
| 30 |
+
A :class:`FmriEncoderModel` instance.
|
| 31 |
+
roi_indices : dict[str, np.ndarray], optional
|
| 32 |
+
If provided, also compute per-ROI summary scores.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(
|
| 36 |
+
self,
|
| 37 |
+
model: torch.nn.Module,
|
| 38 |
+
roi_indices: dict[str, np.ndarray] | None = None,
|
| 39 |
+
):
|
| 40 |
+
self.model = model
|
| 41 |
+
self.roi_indices = roi_indices
|
| 42 |
+
|
| 43 |
+
def attribute(self, batch: SegmentData) -> dict[str, np.ndarray]:
|
| 44 |
+
"""Compute modality importance scores for a single batch.
|
| 45 |
+
|
| 46 |
+
Parameters
|
| 47 |
+
----------
|
| 48 |
+
batch : SegmentData
|
| 49 |
+
Input batch containing features for all modalities.
|
| 50 |
+
|
| 51 |
+
Returns
|
| 52 |
+
-------
|
| 53 |
+
dict[str, np.ndarray]
|
| 54 |
+
Keys are modality names (e.g. ``"text"``, ``"audio"``,
|
| 55 |
+
``"video"``) mapped to importance arrays of shape
|
| 56 |
+
``(n_vertices,)``. If *roi_indices* was provided, additional
|
| 57 |
+
keys like ``"text_roi"`` map ROI names to scalar scores.
|
| 58 |
+
"""
|
| 59 |
+
self.model.eval()
|
| 60 |
+
modalities = [m for m in self.model.feature_dims if m in batch.data]
|
| 61 |
+
|
| 62 |
+
with torch.inference_mode():
|
| 63 |
+
baseline = self.model(batch).detach() # (B, V, T)
|
| 64 |
+
baseline_mean = baseline.mean(dim=(0, 2)).cpu().numpy() # (V,)
|
| 65 |
+
|
| 66 |
+
scores: dict[str, np.ndarray] = {}
|
| 67 |
+
|
| 68 |
+
for mod in modalities:
|
| 69 |
+
ablated_data = {k: v.clone() for k, v in batch.data.items()}
|
| 70 |
+
ablated_data[mod] = torch.zeros_like(ablated_data[mod])
|
| 71 |
+
ablated_batch = SegmentData(data=ablated_data, segments=batch.segments)
|
| 72 |
+
|
| 73 |
+
with torch.inference_mode():
|
| 74 |
+
ablated_pred = self.model(ablated_batch).detach()
|
| 75 |
+
ablated_mean = ablated_pred.mean(dim=(0, 2)).cpu().numpy()
|
| 76 |
+
|
| 77 |
+
importance = np.abs(baseline_mean - ablated_mean)
|
| 78 |
+
scores[mod] = importance
|
| 79 |
+
|
| 80 |
+
if self.roi_indices is not None:
|
| 81 |
+
roi_scores = {}
|
| 82 |
+
for roi_name, vertices in self.roi_indices.items():
|
| 83 |
+
valid = vertices[vertices < len(importance)]
|
| 84 |
+
roi_scores[roi_name] = float(importance[valid].mean()) if len(valid) > 0 else 0.0
|
| 85 |
+
scores[f"{mod}_roi"] = roi_scores
|
| 86 |
+
|
| 87 |
+
# Normalise so modality scores sum to 1 at each vertex
|
| 88 |
+
total = sum(scores[m] for m in modalities)
|
| 89 |
+
total = np.where(total > 0, total, 1.0)
|
| 90 |
+
for mod in modalities:
|
| 91 |
+
scores[f"{mod}_normalised"] = scores[mod] / total
|
| 92 |
+
|
| 93 |
+
return scores
|
src/cortexlab/inference/predictor.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""TribeModel for inference and utilities for building event DataFrames."""
|
| 8 |
+
|
| 9 |
+
import logging
|
| 10 |
+
import typing as tp
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
import pandas as pd
|
| 15 |
+
import pydantic
|
| 16 |
+
import requests
|
| 17 |
+
import torch
|
| 18 |
+
import yaml
|
| 19 |
+
from einops import rearrange
|
| 20 |
+
from exca import ConfDict, TaskInfra
|
| 21 |
+
from tqdm import tqdm
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
logger.setLevel(logging.INFO)
|
| 25 |
+
if not logger.handlers:
|
| 26 |
+
_handler = logging.StreamHandler()
|
| 27 |
+
_handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
|
| 28 |
+
logger.addHandler(_handler)
|
| 29 |
+
from neuralset.events.transforms import (
|
| 30 |
+
AddContextToWords,
|
| 31 |
+
AddSentenceToWords,
|
| 32 |
+
AddText,
|
| 33 |
+
ChunkEvents,
|
| 34 |
+
ExtractAudioFromVideo,
|
| 35 |
+
RemoveMissing,
|
| 36 |
+
)
|
| 37 |
+
from neuralset.events.utils import standardize_events
|
| 38 |
+
|
| 39 |
+
from cortexlab.data.transforms import ExtractWordsFromAudio
|
| 40 |
+
from cortexlab.training.experiment import TribeExperiment
|
| 41 |
+
|
| 42 |
+
VALID_SUFFIXES: dict[str, set[str]] = {
|
| 43 |
+
"text_path": {".txt"},
|
| 44 |
+
"audio_path": {".wav", ".mp3", ".flac", ".ogg"},
|
| 45 |
+
"video_path": {".mp4", ".avi", ".mkv", ".mov", ".webm"},
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def download_file(url: str, path: str | Path) -> Path:
|
| 50 |
+
"""Download a file from *url* and save it to *path*.
|
| 51 |
+
|
| 52 |
+
Raises ``requests.HTTPError`` on non-2xx responses.
|
| 53 |
+
"""
|
| 54 |
+
path = Path(path)
|
| 55 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 56 |
+
with requests.get(url, stream=True, timeout=30) as r:
|
| 57 |
+
r.raise_for_status()
|
| 58 |
+
with open(path, "wb") as f:
|
| 59 |
+
for chunk in r.iter_content(chunk_size=128 * 1024):
|
| 60 |
+
if chunk:
|
| 61 |
+
f.write(chunk)
|
| 62 |
+
logger.info(f"Downloaded {url} -> {path}")
|
| 63 |
+
return path
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_audio_and_text_events(
|
| 67 |
+
events: pd.DataFrame, audio_only: bool = False
|
| 68 |
+
) -> pd.DataFrame:
|
| 69 |
+
"""Run the audio/video-to-text pipeline on an events DataFrame.
|
| 70 |
+
|
| 71 |
+
Extracts audio from video, chunks long clips, transcribes words, and
|
| 72 |
+
attaches sentence/context annotations. Set *audio_only* to ``True``
|
| 73 |
+
to skip the transcription and text stages.
|
| 74 |
+
"""
|
| 75 |
+
transforms = [
|
| 76 |
+
ExtractAudioFromVideo(),
|
| 77 |
+
ChunkEvents(event_type_to_chunk="Audio", max_duration=60, min_duration=30),
|
| 78 |
+
ChunkEvents(event_type_to_chunk="Video", max_duration=60, min_duration=30),
|
| 79 |
+
]
|
| 80 |
+
if not audio_only:
|
| 81 |
+
transforms.extend(
|
| 82 |
+
[
|
| 83 |
+
ExtractWordsFromAudio(),
|
| 84 |
+
AddText(),
|
| 85 |
+
AddSentenceToWords(max_unmatched_ratio=0.05),
|
| 86 |
+
AddContextToWords(
|
| 87 |
+
sentence_only=False, max_context_len=1024, split_field=""
|
| 88 |
+
),
|
| 89 |
+
RemoveMissing(),
|
| 90 |
+
]
|
| 91 |
+
)
|
| 92 |
+
events = standardize_events(events)
|
| 93 |
+
for transform in transforms:
|
| 94 |
+
events = transform(events)
|
| 95 |
+
return standardize_events(events)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class TextToEvents(pydantic.BaseModel):
|
| 99 |
+
"""Convert raw text to an events DataFrame via text-to-speech + transcription.
|
| 100 |
+
|
| 101 |
+
The text is synthesised to audio with gTTS, then processed through
|
| 102 |
+
:func:`get_audio_and_text_events` to obtain word-level events.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
text: str
|
| 106 |
+
infra: TaskInfra = TaskInfra()
|
| 107 |
+
|
| 108 |
+
def model_post_init(self, __context: tp.Any) -> None:
|
| 109 |
+
if self.infra.folder is None:
|
| 110 |
+
raise ValueError("A folder must be specified to save the audio file.")
|
| 111 |
+
|
| 112 |
+
@infra.apply()
|
| 113 |
+
def get_events(self) -> pd.DataFrame:
|
| 114 |
+
from gtts import gTTS
|
| 115 |
+
from langdetect import detect
|
| 116 |
+
|
| 117 |
+
audio_path = Path(self.infra.uid_folder(create=True)) / "audio.mp3"
|
| 118 |
+
lang = detect(self.text)
|
| 119 |
+
tts = gTTS(self.text, lang=lang)
|
| 120 |
+
tts.save(str(audio_path))
|
| 121 |
+
logger.info(f"Wrote TTS audio to {audio_path}")
|
| 122 |
+
|
| 123 |
+
audio_event = {
|
| 124 |
+
"type": "Audio",
|
| 125 |
+
"filepath": str(audio_path),
|
| 126 |
+
"start": 0,
|
| 127 |
+
"timeline": "default",
|
| 128 |
+
"subject": "default",
|
| 129 |
+
}
|
| 130 |
+
return get_audio_and_text_events(pd.DataFrame([audio_event]))
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class TribeModel(TribeExperiment):
|
| 134 |
+
"""High-level inference wrapper around :class:`TribeExperiment`.
|
| 135 |
+
|
| 136 |
+
Provides a simple ``from_pretrained`` / ``predict`` interface for
|
| 137 |
+
generating fMRI-like brain-activity predictions from text, audio,
|
| 138 |
+
or video inputs.
|
| 139 |
+
|
| 140 |
+
Typical usage::
|
| 141 |
+
|
| 142 |
+
model = TribeModel.from_pretrained("facebook/tribev2")
|
| 143 |
+
events = model.get_events_dataframe(video_path="clip.mp4")
|
| 144 |
+
preds, segments = model.predict(events)
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
cache_folder: str = "./cache"
|
| 148 |
+
remove_empty_segments: bool = True
|
| 149 |
+
|
| 150 |
+
@classmethod
|
| 151 |
+
def from_pretrained(
|
| 152 |
+
cls,
|
| 153 |
+
checkpoint_dir: str | Path,
|
| 154 |
+
checkpoint_name: str = "best.ckpt",
|
| 155 |
+
cache_folder: str | Path = None,
|
| 156 |
+
cluster: str = None,
|
| 157 |
+
device: str = "auto",
|
| 158 |
+
config_update: dict | None = None,
|
| 159 |
+
) -> "TribeModel":
|
| 160 |
+
"""Load a trained model from a checkpoint directory or HuggingFace Hub repo.
|
| 161 |
+
|
| 162 |
+
``checkpoint_dir`` can be either a local path containing
|
| 163 |
+
``config.yaml`` and ``<checkpoint_name>``, or a HuggingFace Hub
|
| 164 |
+
repo id (e.g. ``"facebook/tribev2"``).
|
| 165 |
+
|
| 166 |
+
Parameters
|
| 167 |
+
----------
|
| 168 |
+
checkpoint_dir:
|
| 169 |
+
Local directory or HuggingFace Hub repo id that contains
|
| 170 |
+
``config.yaml`` and the checkpoint file.
|
| 171 |
+
checkpoint_name:
|
| 172 |
+
Filename of the checkpoint inside *checkpoint_dir*.
|
| 173 |
+
cache_folder:
|
| 174 |
+
Directory used to cache extracted features. Created if it
|
| 175 |
+
does not exist. Defaults to ``"./cache"`` when ``None``.
|
| 176 |
+
cluster:
|
| 177 |
+
Cluster backend forwarded to feature-extractor infra
|
| 178 |
+
(``"auto"`` by default).
|
| 179 |
+
device:
|
| 180 |
+
Torch device string. ``"auto"`` selects CUDA when available.
|
| 181 |
+
config_update:
|
| 182 |
+
Optional dictionary of config overrides applied after the
|
| 183 |
+
YAML config is loaded.
|
| 184 |
+
|
| 185 |
+
Returns
|
| 186 |
+
-------
|
| 187 |
+
TribeModel
|
| 188 |
+
A ready-to-use model instance with weights loaded in eval mode.
|
| 189 |
+
"""
|
| 190 |
+
if cache_folder is not None:
|
| 191 |
+
Path(cache_folder).mkdir(parents=True, exist_ok=True)
|
| 192 |
+
if device == "auto":
|
| 193 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 194 |
+
checkpoint_dir = Path(checkpoint_dir)
|
| 195 |
+
if checkpoint_dir.exists():
|
| 196 |
+
config_path = checkpoint_dir / "config.yaml"
|
| 197 |
+
ckpt_path = checkpoint_dir / checkpoint_name
|
| 198 |
+
else:
|
| 199 |
+
from huggingface_hub import hf_hub_download
|
| 200 |
+
|
| 201 |
+
repo_id = str(checkpoint_dir)
|
| 202 |
+
config_path = hf_hub_download(repo_id, "config.yaml")
|
| 203 |
+
ckpt_path = hf_hub_download(repo_id, checkpoint_name)
|
| 204 |
+
with open(config_path, "r") as f:
|
| 205 |
+
config = ConfDict(yaml.load(f, Loader=yaml.UnsafeLoader))
|
| 206 |
+
for modality in ["text", "audio", "video"]:
|
| 207 |
+
config[f"data.{modality}_feature.infra.folder"] = cache_folder
|
| 208 |
+
config[f"data.{modality}_feature.infra.cluster"] = cluster
|
| 209 |
+
|
| 210 |
+
for param in [
|
| 211 |
+
"infra.workdir",
|
| 212 |
+
"data.study.infra_timelines",
|
| 213 |
+
"data.neuro.infra",
|
| 214 |
+
"data.image_feature.infra",
|
| 215 |
+
]:
|
| 216 |
+
config.pop(param)
|
| 217 |
+
config["data.study.path"] = "."
|
| 218 |
+
config["average_subjects"] = True
|
| 219 |
+
config["checkpoint_path"] = str(config["infra.folder"]) + f"/{checkpoint_name}"
|
| 220 |
+
config["cache_folder"] = (
|
| 221 |
+
str(cache_folder) if cache_folder is not None else "./cache"
|
| 222 |
+
)
|
| 223 |
+
if config_update is not None:
|
| 224 |
+
config.update(config_update)
|
| 225 |
+
xp = cls(**config)
|
| 226 |
+
|
| 227 |
+
logger.info(f"Loading model from {ckpt_path}")
|
| 228 |
+
ckpt = torch.load(ckpt_path, map_location="cpu", weights_only=True, mmap=True)
|
| 229 |
+
build_args = ckpt["model_build_args"]
|
| 230 |
+
state_dict = {
|
| 231 |
+
k.removeprefix("model."): v for k, v in ckpt["state_dict"].items()
|
| 232 |
+
}
|
| 233 |
+
del ckpt
|
| 234 |
+
|
| 235 |
+
model = xp.brain_model_config.build(**build_args)
|
| 236 |
+
model.load_state_dict(state_dict, strict=True, assign=True)
|
| 237 |
+
del state_dict
|
| 238 |
+
model.to(device)
|
| 239 |
+
model.eval()
|
| 240 |
+
xp._model = model
|
| 241 |
+
return xp
|
| 242 |
+
|
| 243 |
+
def get_events_dataframe(
|
| 244 |
+
self,
|
| 245 |
+
text_path: str | None = None,
|
| 246 |
+
audio_path: str | None = None,
|
| 247 |
+
video_path: str | None = None,
|
| 248 |
+
) -> pd.DataFrame:
|
| 249 |
+
"""Build an events DataFrame from exactly one input source.
|
| 250 |
+
|
| 251 |
+
Parameters
|
| 252 |
+
----------
|
| 253 |
+
text_path:
|
| 254 |
+
Path to a ``.txt`` file. The text is converted to speech, then
|
| 255 |
+
transcribed back to produce word-level events.
|
| 256 |
+
audio_path:
|
| 257 |
+
Path to an audio file (``.wav``, ``.mp3``, ``.flac``, ``.ogg``).
|
| 258 |
+
video_path:
|
| 259 |
+
Path to a video file (``.mp4``, ``.avi``, ``.mkv``, ``.mov``,
|
| 260 |
+
``.webm``).
|
| 261 |
+
|
| 262 |
+
Returns
|
| 263 |
+
-------
|
| 264 |
+
pd.DataFrame
|
| 265 |
+
Standardised events DataFrame with columns such as ``type``,
|
| 266 |
+
``filepath``, ``start``, ``duration``, ``timeline``, and
|
| 267 |
+
``subject``.
|
| 268 |
+
|
| 269 |
+
Raises
|
| 270 |
+
------
|
| 271 |
+
ValueError
|
| 272 |
+
If zero or more than one path is provided, or if the file
|
| 273 |
+
extension does not match the expected suffixes.
|
| 274 |
+
FileNotFoundError
|
| 275 |
+
If the specified file does not exist.
|
| 276 |
+
"""
|
| 277 |
+
provided = {
|
| 278 |
+
name: value
|
| 279 |
+
for name, value in [
|
| 280 |
+
("text_path", text_path),
|
| 281 |
+
("audio_path", audio_path),
|
| 282 |
+
("video_path", video_path),
|
| 283 |
+
]
|
| 284 |
+
if value is not None
|
| 285 |
+
}
|
| 286 |
+
if len(provided) != 1:
|
| 287 |
+
raise ValueError(
|
| 288 |
+
f"Exactly one of text_path, audio_path, video_path must be "
|
| 289 |
+
f"provided, got: {list(provided.keys()) or 'none'}"
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
name, value = next(iter(provided.items()))
|
| 293 |
+
path = Path(value)
|
| 294 |
+
suffix = path.suffix.lower()
|
| 295 |
+
if suffix not in VALID_SUFFIXES[name]:
|
| 296 |
+
raise ValueError(
|
| 297 |
+
f"{name} must end with one of {sorted(VALID_SUFFIXES[name])}, "
|
| 298 |
+
f"got '{suffix}'"
|
| 299 |
+
)
|
| 300 |
+
if not path.is_file():
|
| 301 |
+
raise FileNotFoundError(f"{name} does not exist: {path}")
|
| 302 |
+
|
| 303 |
+
if text_path is not None:
|
| 304 |
+
text = path.read_text(encoding="utf-8")
|
| 305 |
+
if not text.strip():
|
| 306 |
+
raise ValueError(f"Text file is empty: {path}")
|
| 307 |
+
return TextToEvents(
|
| 308 |
+
text=text,
|
| 309 |
+
infra={"folder": self.cache_folder, "mode": "retry"},
|
| 310 |
+
).get_events()
|
| 311 |
+
|
| 312 |
+
event_type = "Audio" if audio_path is not None else "Video"
|
| 313 |
+
event = {
|
| 314 |
+
"type": event_type,
|
| 315 |
+
"filepath": str(path),
|
| 316 |
+
"start": 0,
|
| 317 |
+
"timeline": "default",
|
| 318 |
+
"subject": "default",
|
| 319 |
+
}
|
| 320 |
+
return get_audio_and_text_events(pd.DataFrame([event]))
|
| 321 |
+
|
| 322 |
+
def predict(
|
| 323 |
+
self, events: pd.DataFrame, verbose: bool = True
|
| 324 |
+
) -> tuple[np.ndarray, list]:
|
| 325 |
+
"""Run inference on an events DataFrame and return per-TR predictions.
|
| 326 |
+
|
| 327 |
+
Each batch is split into segments of length ``data.TR``. When
|
| 328 |
+
``remove_empty_segments`` is ``True`` (the default), segments that
|
| 329 |
+
contain no events are discarded.
|
| 330 |
+
|
| 331 |
+
Parameters
|
| 332 |
+
----------
|
| 333 |
+
events:
|
| 334 |
+
Events DataFrame, typically produced by
|
| 335 |
+
:meth:`get_events_dataframe`.
|
| 336 |
+
verbose:
|
| 337 |
+
If ``True`` (default), display a ``tqdm`` progress bar.
|
| 338 |
+
|
| 339 |
+
Returns
|
| 340 |
+
-------
|
| 341 |
+
preds : np.ndarray
|
| 342 |
+
Array of shape ``(n_kept_segments, n_vertices)`` with the
|
| 343 |
+
predicted brain activity.
|
| 344 |
+
all_segments : list
|
| 345 |
+
Corresponding segment objects aligned with *preds*.
|
| 346 |
+
|
| 347 |
+
Raises
|
| 348 |
+
------
|
| 349 |
+
RuntimeError
|
| 350 |
+
If the model has not been loaded via :meth:`from_pretrained`.
|
| 351 |
+
"""
|
| 352 |
+
if self._model is None:
|
| 353 |
+
raise RuntimeError(
|
| 354 |
+
"TribeModel must be instantiated via the .from_pretrained method"
|
| 355 |
+
)
|
| 356 |
+
model = self._model
|
| 357 |
+
loader = self.data.get_loaders(events=events, split_to_build="all")["all"]
|
| 358 |
+
|
| 359 |
+
preds, all_segments = [], []
|
| 360 |
+
n_samples, n_kept = 0, 0
|
| 361 |
+
with torch.inference_mode():
|
| 362 |
+
for batch in tqdm(loader, disable=not verbose):
|
| 363 |
+
batch = batch.to(model.device)
|
| 364 |
+
batch_segments = []
|
| 365 |
+
for segment in batch.segments:
|
| 366 |
+
for t in np.arange(0, segment.duration - 1e-2, self.data.TR):
|
| 367 |
+
batch_segments.append(
|
| 368 |
+
segment.copy(offset=t, duration=self.data.TR)
|
| 369 |
+
)
|
| 370 |
+
if self.remove_empty_segments:
|
| 371 |
+
keep = np.array([len(s.ns_events) > 0 for s in batch_segments])
|
| 372 |
+
else:
|
| 373 |
+
keep = np.ones(len(batch_segments), dtype=bool)
|
| 374 |
+
n_kept += keep.sum()
|
| 375 |
+
n_samples += len(batch_segments)
|
| 376 |
+
batch_segments = [s for i, s in enumerate(batch_segments) if keep[i]]
|
| 377 |
+
y_pred = model(batch).detach().cpu().numpy()
|
| 378 |
+
y_pred = rearrange(y_pred, "b d t -> (b t) d")[keep]
|
| 379 |
+
preds.append(y_pred)
|
| 380 |
+
all_segments.extend(batch_segments)
|
| 381 |
+
preds = np.concatenate(preds)
|
| 382 |
+
if len(all_segments) != preds.shape[0]:
|
| 383 |
+
raise ValueError(
|
| 384 |
+
f"Number of samples: {preds.shape[0]} != {len(all_segments)}"
|
| 385 |
+
)
|
| 386 |
+
logger.info(
|
| 387 |
+
"Predicted %d / %d segments (%.1f%% kept)",
|
| 388 |
+
n_kept,
|
| 389 |
+
n_samples,
|
| 390 |
+
100.0 * n_kept / max(n_samples, 1),
|
| 391 |
+
)
|
| 392 |
+
return preds, all_segments
|
src/cortexlab/inference/streaming.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Real-time sliding-window fMRI prediction from live feature streams.
|
| 2 |
+
|
| 3 |
+
:class:`StreamingPredictor` buffers pre-extracted feature tensors one TR
|
| 4 |
+
at a time and emits cortical-surface predictions once the context window
|
| 5 |
+
is full. Designed for BCI pipelines where features arrive continuously
|
| 6 |
+
from upstream extractor models.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
import threading
|
| 13 |
+
from collections import deque
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
import torch
|
| 17 |
+
from neuralset.dataloader import SegmentData
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class StreamingPredictor:
|
| 23 |
+
"""Sliding-window predictor for real-time fMRI inference.
|
| 24 |
+
|
| 25 |
+
Operates at the *feature level* -- the caller must provide
|
| 26 |
+
pre-extracted tensors from running extractor models (e.g. Wav2Vec2,
|
| 27 |
+
V-JEPA2, LLaMA).
|
| 28 |
+
|
| 29 |
+
Parameters
|
| 30 |
+
----------
|
| 31 |
+
model : torch.nn.Module
|
| 32 |
+
A :class:`FmriEncoderModel` instance in eval mode.
|
| 33 |
+
window_trs : int
|
| 34 |
+
Number of TRs that form the context window.
|
| 35 |
+
step_trs : int
|
| 36 |
+
Emit a prediction every *step_trs* frames.
|
| 37 |
+
tr_seconds : float
|
| 38 |
+
Duration of one TR in seconds.
|
| 39 |
+
modalities : list[str]
|
| 40 |
+
Expected modality keys (e.g. ``["text", "audio", "video"]``).
|
| 41 |
+
device : str or torch.device
|
| 42 |
+
Device for inference.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
model: torch.nn.Module,
|
| 48 |
+
window_trs: int = 40,
|
| 49 |
+
step_trs: int = 1,
|
| 50 |
+
tr_seconds: float = 1.0,
|
| 51 |
+
modalities: list[str] | None = None,
|
| 52 |
+
device: str | torch.device = "cpu",
|
| 53 |
+
):
|
| 54 |
+
self.model = model
|
| 55 |
+
self.window_trs = window_trs
|
| 56 |
+
self.step_trs = step_trs
|
| 57 |
+
self.tr_seconds = tr_seconds
|
| 58 |
+
self.modalities = modalities or list(model.feature_dims.keys())
|
| 59 |
+
self.device = torch.device(device)
|
| 60 |
+
self._buffer: deque[dict[str, torch.Tensor]] = deque(maxlen=window_trs)
|
| 61 |
+
self._frames_since_emit = 0
|
| 62 |
+
self._lock = threading.Lock()
|
| 63 |
+
|
| 64 |
+
@classmethod
|
| 65 |
+
def from_cortexlab_model(
|
| 66 |
+
cls,
|
| 67 |
+
cortexlab_model,
|
| 68 |
+
window_trs: int = 40,
|
| 69 |
+
step_trs: int = 1,
|
| 70 |
+
tr_seconds: float = 1.0,
|
| 71 |
+
device: str = "cuda",
|
| 72 |
+
) -> StreamingPredictor:
|
| 73 |
+
"""Create from a loaded CortexLab/TribeModel inference wrapper."""
|
| 74 |
+
return cls(
|
| 75 |
+
model=cortexlab_model._model,
|
| 76 |
+
window_trs=window_trs,
|
| 77 |
+
step_trs=step_trs,
|
| 78 |
+
tr_seconds=tr_seconds,
|
| 79 |
+
device=device,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
def push_frame(
|
| 83 |
+
self, features: dict[str, torch.Tensor]
|
| 84 |
+
) -> np.ndarray | None:
|
| 85 |
+
"""Push one TR's worth of features and maybe get a prediction.
|
| 86 |
+
|
| 87 |
+
Parameters
|
| 88 |
+
----------
|
| 89 |
+
features : dict[str, torch.Tensor]
|
| 90 |
+
Mapping from modality name to feature tensor. Each tensor
|
| 91 |
+
should have shape ``(n_layers, D)`` or ``(D,)``.
|
| 92 |
+
|
| 93 |
+
Returns
|
| 94 |
+
-------
|
| 95 |
+
np.ndarray or None
|
| 96 |
+
Prediction of shape ``(n_vertices,)`` if a prediction was
|
| 97 |
+
emitted, otherwise ``None``.
|
| 98 |
+
"""
|
| 99 |
+
with self._lock:
|
| 100 |
+
normalised: dict[str, torch.Tensor] = {}
|
| 101 |
+
for mod in self.modalities:
|
| 102 |
+
t = features.get(mod)
|
| 103 |
+
if t is None:
|
| 104 |
+
# Zero-fill missing modality
|
| 105 |
+
dims = self.model.feature_dims.get(mod)
|
| 106 |
+
if dims is not None:
|
| 107 |
+
num_layers, feat_dim = dims
|
| 108 |
+
t = torch.zeros(num_layers, feat_dim)
|
| 109 |
+
else:
|
| 110 |
+
continue
|
| 111 |
+
if t.ndim == 1:
|
| 112 |
+
t = t.unsqueeze(0) # (D,) -> (1, D)
|
| 113 |
+
normalised[mod] = t
|
| 114 |
+
self._buffer.append(normalised)
|
| 115 |
+
self._frames_since_emit += 1
|
| 116 |
+
|
| 117 |
+
if len(self._buffer) < self.window_trs:
|
| 118 |
+
return None
|
| 119 |
+
if self._frames_since_emit < self.step_trs:
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
+
self._frames_since_emit = 0
|
| 123 |
+
return self._predict()
|
| 124 |
+
|
| 125 |
+
def _predict(self) -> np.ndarray:
|
| 126 |
+
"""Run inference on the current buffer contents."""
|
| 127 |
+
batch_data: dict[str, torch.Tensor] = {}
|
| 128 |
+
for mod in self.modalities:
|
| 129 |
+
frames = []
|
| 130 |
+
for frame in self._buffer:
|
| 131 |
+
if mod in frame:
|
| 132 |
+
frames.append(frame[mod])
|
| 133 |
+
if frames:
|
| 134 |
+
# Stack: (T, L, D) -> (L, D, T) -> (1, L, D, T)
|
| 135 |
+
stacked = torch.stack(frames, dim=0) # (T, L, D)
|
| 136 |
+
stacked = stacked.permute(1, 2, 0).unsqueeze(0) # (1, L, D, T)
|
| 137 |
+
batch_data[mod] = stacked.to(self.device)
|
| 138 |
+
|
| 139 |
+
import neuralset.segments as seg
|
| 140 |
+
dummy_segments = [seg.Segment(start=0.0, duration=float(self.window_trs * self.tr_seconds), timeline="stream")]
|
| 141 |
+
batch = SegmentData(data=batch_data, segments=dummy_segments)
|
| 142 |
+
with torch.inference_mode():
|
| 143 |
+
pred = self.model(batch, pool_outputs=True) # (1, V, T')
|
| 144 |
+
return pred[0, :, -1].cpu().numpy() # (V,) -- last timestep
|
| 145 |
+
|
| 146 |
+
def flush(self) -> list[np.ndarray]:
|
| 147 |
+
"""Force-emit predictions for any remaining buffered frames."""
|
| 148 |
+
with self._lock:
|
| 149 |
+
results = []
|
| 150 |
+
if len(self._buffer) >= self.window_trs:
|
| 151 |
+
results.append(self._predict())
|
| 152 |
+
return results
|
| 153 |
+
|
| 154 |
+
def reset(self) -> None:
|
| 155 |
+
"""Clear the buffer and reset the step counter."""
|
| 156 |
+
with self._lock:
|
| 157 |
+
self._buffer.clear()
|
| 158 |
+
self._frames_since_emit = 0
|
src/cortexlab/training/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cortexlab.training.pl_module import BrainModule
|
| 2 |
+
|
| 3 |
+
__all__ = ["BrainModule"]
|
src/cortexlab/training/experiment.py
ADDED
|
@@ -0,0 +1,651 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""Defines the main classes used in the experiment.
|
| 8 |
+
|
| 9 |
+
We suggest the following structure:
|
| 10 |
+
- `Data`: configures dataset and extractors to return DataLoaders
|
| 11 |
+
- `Trainer`: creates the deep learning model and exposes a `fit` and `test` methods
|
| 12 |
+
- `TribeExperiment`: main class that defines the experiment to run by using `Data` and `Trainer`
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import gc
|
| 16 |
+
import logging
|
| 17 |
+
import os
|
| 18 |
+
import typing as tp
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
import neuralset as ns
|
| 22 |
+
import numpy as np
|
| 23 |
+
import pandas as pd
|
| 24 |
+
import pydantic
|
| 25 |
+
import torch
|
| 26 |
+
import yaml
|
| 27 |
+
from exca import ConfDict, TaskInfra
|
| 28 |
+
from neuralset.events.etypes import EventTypesHelper
|
| 29 |
+
from neuralset.events.utils import standardize_events
|
| 30 |
+
from neuraltrain.losses import BaseLoss
|
| 31 |
+
from neuraltrain.metrics import BaseMetric
|
| 32 |
+
from neuraltrain.models import BaseModelConfig
|
| 33 |
+
from neuraltrain.models.common import SubjectLayers
|
| 34 |
+
from neuraltrain.optimizers.base import BaseOptimizer
|
| 35 |
+
from neuraltrain.utils import BaseExperiment, WandbLoggerConfig
|
| 36 |
+
from torch import nn
|
| 37 |
+
from torch.utils.data import DataLoader
|
| 38 |
+
|
| 39 |
+
from cortexlab.data.transforms import * # register custom events transforms in neuralset
|
| 40 |
+
from cortexlab.core.model import * # register custom models in neuraltrain
|
| 41 |
+
from cortexlab.data.studies import * # register studies
|
| 42 |
+
from cortexlab.data.loader import (
|
| 43 |
+
MultiStudyLoader,
|
| 44 |
+
set_study_in_average_subject_mode,
|
| 45 |
+
split_segments_by_time,
|
| 46 |
+
)
|
| 47 |
+
from cortexlab.data.fmri import * # register TribeSurfaceProjector
|
| 48 |
+
|
| 49 |
+
# Configure logger
|
| 50 |
+
LOGGER = logging.getLogger(__name__)
|
| 51 |
+
_handler = logging.StreamHandler()
|
| 52 |
+
_formatter = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s", "%H:%M:%S")
|
| 53 |
+
_handler.setFormatter(_formatter)
|
| 54 |
+
if not LOGGER.handlers:
|
| 55 |
+
LOGGER.addHandler(_handler)
|
| 56 |
+
LOGGER.setLevel(logging.INFO)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _free_extractor_model(extractor: ns.extractors.BaseExtractor) -> None:
|
| 60 |
+
"""Delete cached GPU model from an extractor after its features are cached.
|
| 61 |
+
|
| 62 |
+
Extractors lazily load models onto GPU during ``prepare`` and keep them
|
| 63 |
+
in ``_model``. Since results are persisted to disk, the model is no
|
| 64 |
+
longer needed afterwards and this frees VRAM for subsequent extractors.
|
| 65 |
+
"""
|
| 66 |
+
targets = [extractor]
|
| 67 |
+
if hasattr(extractor, "image"):
|
| 68 |
+
targets.append(extractor.image)
|
| 69 |
+
for target in targets:
|
| 70 |
+
for attr in ("_model",):
|
| 71 |
+
obj = getattr(target, attr, None)
|
| 72 |
+
if isinstance(obj, torch.nn.Module):
|
| 73 |
+
try:
|
| 74 |
+
delattr(target, attr)
|
| 75 |
+
except Exception:
|
| 76 |
+
pass
|
| 77 |
+
gc.collect()
|
| 78 |
+
if torch.cuda.is_available():
|
| 79 |
+
torch.cuda.empty_cache()
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class Data(pydantic.BaseModel):
|
| 83 |
+
"""Handles configuration and creation of DataLoaders from dataset and extractors."""
|
| 84 |
+
|
| 85 |
+
model_config = pydantic.ConfigDict(extra="forbid")
|
| 86 |
+
|
| 87 |
+
study: MultiStudyLoader
|
| 88 |
+
# features
|
| 89 |
+
neuro: ns.extractors.BaseExtractor
|
| 90 |
+
text_feature: ns.extractors.BaseExtractor | None = None
|
| 91 |
+
image_feature: ns.extractors.BaseExtractor | None = None
|
| 92 |
+
audio_feature: ns.extractors.BaseExtractor | None = None
|
| 93 |
+
video_feature: ns.extractors.BaseExtractor | None = None
|
| 94 |
+
subject_id: ns.extractors.LabelEncoder = ns.extractors.LabelEncoder(
|
| 95 |
+
event_field="subject", allow_missing=True, aggregation="first"
|
| 96 |
+
)
|
| 97 |
+
frequency: float | None = None
|
| 98 |
+
features_to_use: list[
|
| 99 |
+
tp.Literal["text", "audio", "video", "image", "context", "flow", "music"]
|
| 100 |
+
]
|
| 101 |
+
features_to_mask: list[
|
| 102 |
+
tp.Literal["text", "audio", "video", "image", "context", "flow", "music"]
|
| 103 |
+
] = []
|
| 104 |
+
n_layers_to_use: int | None = None
|
| 105 |
+
layers_to_use: list[float] | None = None
|
| 106 |
+
layer_aggregation: tp.Literal["group_mean", "mean"] | None = "group_mean"
|
| 107 |
+
# Dataset
|
| 108 |
+
duration_trs: int = 40
|
| 109 |
+
overlap_trs_train: int = 0
|
| 110 |
+
overlap_trs_val: int | None = None
|
| 111 |
+
batch_size: int = 64
|
| 112 |
+
num_workers: int | None = None
|
| 113 |
+
shuffle_train: bool = True
|
| 114 |
+
shuffle_val: bool = False
|
| 115 |
+
stride_drop_incomplete: bool = False
|
| 116 |
+
split_segments_by_time: bool = False
|
| 117 |
+
|
| 118 |
+
def model_post_init(self, __context):
|
| 119 |
+
super().model_post_init(__context)
|
| 120 |
+
layers_to_use = None
|
| 121 |
+
if self.n_layers_to_use is not None or self.layers_to_use is not None:
|
| 122 |
+
assert not (
|
| 123 |
+
self.n_layers_to_use is not None and self.layers_to_use is not None
|
| 124 |
+
), "Only one of n_layers_to_use or layers_to_use can be specified"
|
| 125 |
+
if self.n_layers_to_use is not None:
|
| 126 |
+
layers_to_use = np.linspace(0, 1, self.n_layers_to_use).tolist()
|
| 127 |
+
else:
|
| 128 |
+
layers_to_use = self.layers_to_use
|
| 129 |
+
for modality in self.features_to_use:
|
| 130 |
+
extractor = getattr(self, f"{modality}_feature")
|
| 131 |
+
if hasattr(extractor, "layers"):
|
| 132 |
+
setattr(extractor, "layer_aggregation", self.layer_aggregation)
|
| 133 |
+
if layers_to_use is not None:
|
| 134 |
+
setattr(extractor, "layers", layers_to_use)
|
| 135 |
+
if hasattr(extractor, "image") and hasattr(extractor.image, "layers"):
|
| 136 |
+
setattr(extractor.image, "layer_aggregation", self.layer_aggregation)
|
| 137 |
+
if layers_to_use is not None:
|
| 138 |
+
setattr(extractor.image, "layers", layers_to_use)
|
| 139 |
+
if self.frequency is not None:
|
| 140 |
+
for modality in self.features_to_use:
|
| 141 |
+
extractor = getattr(self, f"{modality}_feature")
|
| 142 |
+
if hasattr(extractor, "frequency"):
|
| 143 |
+
setattr(extractor, "frequency", self.frequency)
|
| 144 |
+
|
| 145 |
+
@property
|
| 146 |
+
def TR(self) -> float:
|
| 147 |
+
return 1 / self.neuro.frequency
|
| 148 |
+
|
| 149 |
+
def get_events(self) -> pd.DataFrame:
|
| 150 |
+
events = self.study.run()
|
| 151 |
+
events = events[events.type != "Sentence"]
|
| 152 |
+
|
| 153 |
+
cols = ["index", "subject", "timeline"]
|
| 154 |
+
event_summary = (
|
| 155 |
+
events.reset_index().groupby(["study", "split", "type"])[cols].nunique()
|
| 156 |
+
)
|
| 157 |
+
LOGGER.info("Event summary: \n%s", event_summary)
|
| 158 |
+
return events
|
| 159 |
+
|
| 160 |
+
def get_loaders(
|
| 161 |
+
self,
|
| 162 |
+
events: pd.DataFrame | None = None,
|
| 163 |
+
split_to_build: tp.Literal["train", "val", "all"] | None = None,
|
| 164 |
+
) -> tuple[dict[str, DataLoader], int]:
|
| 165 |
+
|
| 166 |
+
if events is None:
|
| 167 |
+
events = self.get_events()
|
| 168 |
+
else:
|
| 169 |
+
events = standardize_events(events)
|
| 170 |
+
|
| 171 |
+
extractors = {}
|
| 172 |
+
for modality in self.features_to_use:
|
| 173 |
+
extractors[modality] = getattr(self, f"{modality}_feature")
|
| 174 |
+
if "Fmri" in events.type.unique():
|
| 175 |
+
extractors["fmri"] = self.neuro
|
| 176 |
+
dummy_events = []
|
| 177 |
+
for timeline_name, timeline in events.groupby("timeline"):
|
| 178 |
+
if "split" in timeline.columns:
|
| 179 |
+
splits = timeline.split.dropna().unique()
|
| 180 |
+
assert (
|
| 181 |
+
len(splits) == 1
|
| 182 |
+
), f"Timeline {timeline_name} has multiple splits: {splits}"
|
| 183 |
+
split = splits[0]
|
| 184 |
+
else:
|
| 185 |
+
split = "all"
|
| 186 |
+
dummy_event = {
|
| 187 |
+
"type": "CategoricalEvent",
|
| 188 |
+
"timeline": timeline_name,
|
| 189 |
+
"start": timeline.start.min(),
|
| 190 |
+
"duration": timeline.stop.max() - timeline.start.min(),
|
| 191 |
+
"split": split,
|
| 192 |
+
"subject": timeline.subject.unique()[0],
|
| 193 |
+
}
|
| 194 |
+
dummy_events.append(dummy_event)
|
| 195 |
+
events = pd.concat([events, pd.DataFrame(dummy_events)])
|
| 196 |
+
events = standardize_events(events)
|
| 197 |
+
|
| 198 |
+
extractors["subject_id"] = self.subject_id
|
| 199 |
+
|
| 200 |
+
features_to_remove = set()
|
| 201 |
+
for extractor_name, extractor in extractors.items():
|
| 202 |
+
event_types = EventTypesHelper(extractor.event_types).names
|
| 203 |
+
if not any(
|
| 204 |
+
[event_type in events.type.unique() for event_type in event_types]
|
| 205 |
+
):
|
| 206 |
+
features_to_remove.add(extractor_name)
|
| 207 |
+
for extractor_name in features_to_remove:
|
| 208 |
+
del extractors[extractor_name]
|
| 209 |
+
LOGGER.warning(
|
| 210 |
+
"Removing extractor %s as there are no corresponding events",
|
| 211 |
+
extractor_name,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
for name, extractor in extractors.items():
|
| 215 |
+
LOGGER.info("Preparing extractor: %s", name)
|
| 216 |
+
extractor.prepare(events)
|
| 217 |
+
_free_extractor_model(extractor)
|
| 218 |
+
|
| 219 |
+
# Prepare dataloaders
|
| 220 |
+
loaders = {}
|
| 221 |
+
if split_to_build is None:
|
| 222 |
+
splits = ["train", "val"]
|
| 223 |
+
else:
|
| 224 |
+
splits = [split_to_build]
|
| 225 |
+
for split in splits:
|
| 226 |
+
LOGGER.info("Building dataloader for split %s", split)
|
| 227 |
+
if split == "all" or self.split_segments_by_time:
|
| 228 |
+
split_sel = [True] * len(events)
|
| 229 |
+
shuffle = False
|
| 230 |
+
overlap_trs = self.overlap_trs_train
|
| 231 |
+
else:
|
| 232 |
+
split_sel = events.split == split
|
| 233 |
+
if split not in events.split.unique():
|
| 234 |
+
shuffle = False
|
| 235 |
+
else:
|
| 236 |
+
shuffle = (
|
| 237 |
+
self.shuffle_train if split == "train" else self.shuffle_val
|
| 238 |
+
)
|
| 239 |
+
if split == "val":
|
| 240 |
+
overlap_trs = self.overlap_trs_val or self.overlap_trs_train
|
| 241 |
+
else:
|
| 242 |
+
overlap_trs = self.overlap_trs_train
|
| 243 |
+
|
| 244 |
+
sel = np.array(split_sel)
|
| 245 |
+
segments = ns.segments.list_segments(
|
| 246 |
+
events[sel],
|
| 247 |
+
triggers=events[sel].type == "CategoricalEvent",
|
| 248 |
+
stride=(self.duration_trs - overlap_trs) * self.TR,
|
| 249 |
+
duration=self.duration_trs * self.TR,
|
| 250 |
+
stride_drop_incomplete=self.stride_drop_incomplete,
|
| 251 |
+
)
|
| 252 |
+
if self.split_segments_by_time:
|
| 253 |
+
LOGGER.info(f"Total number of segments: {len(segments)}")
|
| 254 |
+
segments = split_segments_by_time(
|
| 255 |
+
segments,
|
| 256 |
+
val_ratio=self.study.transforms["split"].val_ratio,
|
| 257 |
+
split=split,
|
| 258 |
+
)
|
| 259 |
+
LOGGER.info(f"# {split} segments: {len(segments)}")
|
| 260 |
+
if len(segments) == 0:
|
| 261 |
+
LOGGER.warning("No events found for split %s", split)
|
| 262 |
+
continue
|
| 263 |
+
dataset = ns.dataloader.SegmentDataset(
|
| 264 |
+
extractors=extractors,
|
| 265 |
+
segments=segments,
|
| 266 |
+
remove_incomplete_segments=False,
|
| 267 |
+
)
|
| 268 |
+
dataloader = dataset.build_dataloader(
|
| 269 |
+
shuffle=shuffle,
|
| 270 |
+
num_workers=self.num_workers,
|
| 271 |
+
batch_size=self.batch_size,
|
| 272 |
+
)
|
| 273 |
+
loaders[split] = dataloader
|
| 274 |
+
|
| 275 |
+
return loaders
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class TribeExperiment(BaseExperiment):
|
| 279 |
+
"""Defines the main experiment pipeline including data loading and training/evaluation."""
|
| 280 |
+
|
| 281 |
+
model_config = pydantic.ConfigDict(extra="forbid")
|
| 282 |
+
|
| 283 |
+
data: Data
|
| 284 |
+
# Reproducibility
|
| 285 |
+
seed: int | None = 33
|
| 286 |
+
# Model
|
| 287 |
+
brain_model_config: BaseModelConfig
|
| 288 |
+
# Loss
|
| 289 |
+
loss: BaseLoss
|
| 290 |
+
# Optimization
|
| 291 |
+
optim: BaseOptimizer
|
| 292 |
+
# Metrics
|
| 293 |
+
metrics: list[BaseMetric]
|
| 294 |
+
monitor: str = "val/pearson"
|
| 295 |
+
# Weights & Biases
|
| 296 |
+
wandb_config: WandbLoggerConfig | None = None
|
| 297 |
+
# Hardware
|
| 298 |
+
accelerator: str = "gpu"
|
| 299 |
+
# Optim
|
| 300 |
+
n_epochs: int | None = 10
|
| 301 |
+
max_steps: int = -1
|
| 302 |
+
patience: int | None = None
|
| 303 |
+
limit_train_batches: int | None = None
|
| 304 |
+
accumulate_grad_batches: int = 1
|
| 305 |
+
# Others
|
| 306 |
+
enable_progress_bar: bool = True
|
| 307 |
+
log_every_n_steps: int | None = None
|
| 308 |
+
fast_dev_run: bool = False
|
| 309 |
+
save_checkpoints: bool = True
|
| 310 |
+
checkpoint_filename: str = "best"
|
| 311 |
+
resize_subject_layer: bool = False
|
| 312 |
+
freeze_backbone: bool = False
|
| 313 |
+
# Eval
|
| 314 |
+
average_subjects: bool = False
|
| 315 |
+
checkpoint_path: str | None = None
|
| 316 |
+
load_checkpoint: bool = True
|
| 317 |
+
test_only: bool = False
|
| 318 |
+
|
| 319 |
+
# Internal properties
|
| 320 |
+
_trainer: tp.Any = None
|
| 321 |
+
_model: tp.Any = None
|
| 322 |
+
_logger: tp.Any = None
|
| 323 |
+
|
| 324 |
+
# Others
|
| 325 |
+
infra: TaskInfra = TaskInfra(version="1")
|
| 326 |
+
|
| 327 |
+
def model_post_init(self, __context: tp.Any) -> None:
|
| 328 |
+
super().model_post_init(__context)
|
| 329 |
+
if self.infra.folder is None:
|
| 330 |
+
msg = "infra.folder needs to be specified to save the results."
|
| 331 |
+
raise ValueError(msg)
|
| 332 |
+
# Update Trainer parameters based on infra
|
| 333 |
+
self.infra.tasks_per_node = self.infra.gpus_per_node
|
| 334 |
+
self.infra.slurm_use_srun = True if self.infra.gpus_per_node > 1 else False
|
| 335 |
+
if self.infra.gpus_per_node > 1:
|
| 336 |
+
self.metrics = [m for m in self.metrics if m.name not in ["TopkAcc"]]
|
| 337 |
+
self.data.batch_size = self.data.batch_size // self.infra.gpus_per_node
|
| 338 |
+
if self.accumulate_grad_batches > 1:
|
| 339 |
+
self.data.batch_size = self.data.batch_size // self.accumulate_grad_batches
|
| 340 |
+
|
| 341 |
+
if (
|
| 342 |
+
not (self.checkpoint_path and self.load_checkpoint)
|
| 343 |
+
) or self.resize_subject_layer:
|
| 344 |
+
study_summary = self.data.study.study_summary()
|
| 345 |
+
self.data.subject_id.predefined_mapping = {
|
| 346 |
+
subject: i for i, subject in enumerate(study_summary.subject.unique())
|
| 347 |
+
}
|
| 348 |
+
self.brain_model_config.subject_layers.n_subjects = (
|
| 349 |
+
study_summary.subject.nunique()
|
| 350 |
+
)
|
| 351 |
+
if isinstance(self.brain_model_config.projector, SubjectLayers):
|
| 352 |
+
self.brain_model_config.projector.n_subjects = (
|
| 353 |
+
study_summary.subject.nunique()
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
if self.average_subjects:
|
| 357 |
+
study_name = self.data.study.names
|
| 358 |
+
self.brain_model_config.subject_layers.average_subjects = True
|
| 359 |
+
self.brain_model_config.subject_layers.n_subjects = 0
|
| 360 |
+
if isinstance(self.brain_model_config.projector, SubjectLayers):
|
| 361 |
+
self.brain_model_config.projector.average_subjects = True
|
| 362 |
+
self.data.neuro.aggregation = "mean"
|
| 363 |
+
self.data.subject_id.predefined_mapping = None
|
| 364 |
+
if isinstance(study_name, str):
|
| 365 |
+
LOGGER.debug(f"Setting study {study_name} in average subject mode")
|
| 366 |
+
trigger_type = (
|
| 367 |
+
"Video" if study_name in ["Wen2017", "Allen2022Bold"] else "Audio"
|
| 368 |
+
)
|
| 369 |
+
self.data.study = set_study_in_average_subject_mode(
|
| 370 |
+
self.data.study, trigger_type=trigger_type, trigger_field="filepath"
|
| 371 |
+
)
|
| 372 |
+
else:
|
| 373 |
+
pass
|
| 374 |
+
# LOGGER.warning(
|
| 375 |
+
# "Cannot set study in average subject mode with multiple studies"
|
| 376 |
+
# )
|
| 377 |
+
|
| 378 |
+
def _get_checkpoint_path(self) -> Path | None:
|
| 379 |
+
if self.checkpoint_path:
|
| 380 |
+
assert Path(
|
| 381 |
+
self.checkpoint_path
|
| 382 |
+
).exists(), f"Checkpoint path {self.checkpoint_path} does not exist."
|
| 383 |
+
checkpoint_path = Path(self.checkpoint_path)
|
| 384 |
+
else:
|
| 385 |
+
checkpoint_path = Path(self.infra.folder) / "last.ckpt"
|
| 386 |
+
if not checkpoint_path.exists():
|
| 387 |
+
checkpoint_path = None
|
| 388 |
+
return checkpoint_path
|
| 389 |
+
|
| 390 |
+
def _init_module(self, model: nn.Module) -> tp.Any:
|
| 391 |
+
from cortexlab.training.pl_module import BrainModule
|
| 392 |
+
|
| 393 |
+
checkpoint_path = self._get_checkpoint_path()
|
| 394 |
+
if (
|
| 395 |
+
self.load_checkpoint
|
| 396 |
+
and checkpoint_path is not None
|
| 397 |
+
and not self.resize_subject_layer
|
| 398 |
+
):
|
| 399 |
+
LOGGER.info(f"Loading model from {checkpoint_path}")
|
| 400 |
+
init_fn = BrainModule.load_from_checkpoint
|
| 401 |
+
init_kwargs = {"checkpoint_path": checkpoint_path, "strict": False}
|
| 402 |
+
else:
|
| 403 |
+
init_fn = BrainModule
|
| 404 |
+
init_kwargs = {}
|
| 405 |
+
|
| 406 |
+
metrics = {
|
| 407 |
+
split + "/" + metric.log_name: metric.build()
|
| 408 |
+
for metric in self.metrics
|
| 409 |
+
for split in ["val", "test"]
|
| 410 |
+
}
|
| 411 |
+
metrics = nn.ModuleDict(metrics)
|
| 412 |
+
pl_module = init_fn(
|
| 413 |
+
model=model,
|
| 414 |
+
loss=self.loss.build(),
|
| 415 |
+
optim_config=self.optim,
|
| 416 |
+
metrics=metrics,
|
| 417 |
+
config=ConfDict(self.model_dump()),
|
| 418 |
+
**init_kwargs,
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
if self.resize_subject_layer:
|
| 422 |
+
LOGGER.info("Resizing subject layer")
|
| 423 |
+
checkpoint = torch.load(checkpoint_path)
|
| 424 |
+
state_dict = checkpoint["state_dict"]
|
| 425 |
+
weights = state_dict["model.predictor.weights"]
|
| 426 |
+
_, in_channels, out_channels = weights.shape
|
| 427 |
+
n_subjects = self.brain_model_config.subject_layers.n_subjects
|
| 428 |
+
if self.brain_model_config.subject_layers.subject_dropout:
|
| 429 |
+
n_subjects += 1
|
| 430 |
+
if "model.predictor.bias" in state_dict:
|
| 431 |
+
bias = state_dict["model.predictor.bias"]
|
| 432 |
+
new_bias = torch.nn.Parameter(torch.zeros(n_subjects, out_channels))
|
| 433 |
+
new_bias.data[:] = bias.mean(dim=0).repeat(n_subjects, 1)
|
| 434 |
+
state_dict["model.predictor.bias"] = new_bias
|
| 435 |
+
if self.freeze_backbone:
|
| 436 |
+
for param in pl_module.parameters():
|
| 437 |
+
param.requires_grad = False
|
| 438 |
+
for param in pl_module.model.predictor.parameters():
|
| 439 |
+
param.requires_grad = True
|
| 440 |
+
if (
|
| 441 |
+
self.brain_model_config.low_rank_head is not None
|
| 442 |
+
and self.brain_model_config.low_rank_head != in_channels
|
| 443 |
+
):
|
| 444 |
+
r = self.brain_model_config.low_rank_head
|
| 445 |
+
if "model.low_rank_head.weight" in state_dict:
|
| 446 |
+
W1, W2 = (
|
| 447 |
+
state_dict["model.low_rank_head.weight"].cpu(),
|
| 448 |
+
state_dict["model.predictor.weights"].mean(dim=0).cpu(),
|
| 449 |
+
)
|
| 450 |
+
prod = torch.matmul(W1.t(), W2)
|
| 451 |
+
else:
|
| 452 |
+
prod = state_dict["model.predictor.weights"].mean(dim=0).cpu()
|
| 453 |
+
U, S, V = torch.svd(prod)
|
| 454 |
+
U = U[:, :r]
|
| 455 |
+
S = S[:r]
|
| 456 |
+
V = V[:, :r]
|
| 457 |
+
state_dict["model.low_rank_head.weight"] = U.t()
|
| 458 |
+
state_dict["model.predictor.weights"] = torch.matmul(
|
| 459 |
+
torch.diag(S), V.t()
|
| 460 |
+
).repeat(n_subjects, 1, 1)
|
| 461 |
+
if "model.predictor.bias" in state_dict:
|
| 462 |
+
state_dict["model.low_rank_head.bias"] = torch.zeros(r)
|
| 463 |
+
for param in pl_module.model.low_rank_head.parameters():
|
| 464 |
+
param.requires_grad = True
|
| 465 |
+
else:
|
| 466 |
+
state_dict["model.predictor.weights"] = weights.mean(dim=0).repeat(
|
| 467 |
+
n_subjects, 1, 1
|
| 468 |
+
)
|
| 469 |
+
pl_module.load_state_dict(state_dict, strict=False)
|
| 470 |
+
|
| 471 |
+
return pl_module
|
| 472 |
+
|
| 473 |
+
def _setup_trainer(
|
| 474 |
+
self, train_loader: DataLoader, override_n_devices: int | None = None
|
| 475 |
+
) -> tp.Any:
|
| 476 |
+
import lightning.pytorch as pl
|
| 477 |
+
from lightning.pytorch.callbacks import (
|
| 478 |
+
EarlyStopping,
|
| 479 |
+
LearningRateMonitor,
|
| 480 |
+
ModelCheckpoint,
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
batch = next(iter(train_loader))
|
| 484 |
+
feature_dims = {}
|
| 485 |
+
for modality in self.data.features_to_use:
|
| 486 |
+
if (
|
| 487 |
+
modality in batch.data and modality not in self.data.features_to_mask
|
| 488 |
+
): # B, L, D, T
|
| 489 |
+
if batch.data[modality].ndim == 4:
|
| 490 |
+
feature_dims[modality] = (
|
| 491 |
+
batch.data[modality].shape[1],
|
| 492 |
+
batch.data[modality].shape[2],
|
| 493 |
+
)
|
| 494 |
+
elif batch.data[modality].ndim == 3:
|
| 495 |
+
feature_dims[modality] = (
|
| 496 |
+
1,
|
| 497 |
+
batch.data[modality].shape[1],
|
| 498 |
+
)
|
| 499 |
+
else:
|
| 500 |
+
raise ValueError(
|
| 501 |
+
f"Unexpected number of dimensions for modality {modality}: {batch.data[modality].ndim}"
|
| 502 |
+
)
|
| 503 |
+
else:
|
| 504 |
+
feature_dims[modality] = None
|
| 505 |
+
if "fmri" in batch.data: # read from fmri config
|
| 506 |
+
fmri = batch.data["fmri"]
|
| 507 |
+
n_outputs = fmri.shape[1]
|
| 508 |
+
for metric in self.metrics:
|
| 509 |
+
if hasattr(metric, "kwargs") and "num_outputs" in metric.kwargs:
|
| 510 |
+
metric.kwargs["num_outputs"] = n_outputs
|
| 511 |
+
else: # read from neuro config
|
| 512 |
+
if hasattr(self.data.neuro.projection, "mesh"):
|
| 513 |
+
from neuralset.extractors.neuro import FSAVERAGE_SIZES
|
| 514 |
+
|
| 515 |
+
n_outputs = 2 * FSAVERAGE_SIZES[self.data.neuro.projection.mesh]
|
| 516 |
+
else:
|
| 517 |
+
raise ValueError(
|
| 518 |
+
f"Could not determine number of outputs for neuro extractor {self.data.neuro}"
|
| 519 |
+
)
|
| 520 |
+
brain_model = self.brain_model_config.build(
|
| 521 |
+
feature_dims=feature_dims,
|
| 522 |
+
n_outputs=n_outputs,
|
| 523 |
+
n_output_timesteps=self.data.duration_trs,
|
| 524 |
+
)
|
| 525 |
+
LOGGER.info("Extractor dims: %s", feature_dims)
|
| 526 |
+
input_data = brain_model.aggregate_features(batch)
|
| 527 |
+
LOGGER.info("Input shapes: %s", input_data.shape)
|
| 528 |
+
LOGGER.info("Target shapes: %s", n_outputs)
|
| 529 |
+
_ = brain_model(batch)
|
| 530 |
+
total_params = sum(p.numel() for p in brain_model.parameters())
|
| 531 |
+
LOGGER.info(f"Total parameters: {total_params}")
|
| 532 |
+
self._model = self._init_module(brain_model)
|
| 533 |
+
if self.monitor == "val/pearson":
|
| 534 |
+
mode = "max"
|
| 535 |
+
else:
|
| 536 |
+
mode = "min"
|
| 537 |
+
callbacks = [
|
| 538 |
+
LearningRateMonitor(logging_interval="epoch"),
|
| 539 |
+
]
|
| 540 |
+
if self.patience is not None:
|
| 541 |
+
callbacks.append(
|
| 542 |
+
EarlyStopping(monitor=self.monitor, mode=mode, patience=self.patience)
|
| 543 |
+
)
|
| 544 |
+
if self.save_checkpoints:
|
| 545 |
+
callbacks.append(
|
| 546 |
+
ModelCheckpoint(
|
| 547 |
+
save_last=True,
|
| 548 |
+
save_top_k=1,
|
| 549 |
+
dirpath=self.infra.folder,
|
| 550 |
+
filename=self.checkpoint_filename,
|
| 551 |
+
monitor=self.monitor,
|
| 552 |
+
mode=mode,
|
| 553 |
+
save_on_train_epoch_end=True,
|
| 554 |
+
)
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
trainer = pl.Trainer(
|
| 558 |
+
strategy="auto" if self.infra.gpus_per_node == 1 else "fsdp",
|
| 559 |
+
devices=override_n_devices or self.infra.gpus_per_node,
|
| 560 |
+
accelerator=self.accelerator,
|
| 561 |
+
max_epochs=self.n_epochs,
|
| 562 |
+
max_steps=self.max_steps,
|
| 563 |
+
limit_train_batches=self.limit_train_batches,
|
| 564 |
+
enable_progress_bar=self.enable_progress_bar,
|
| 565 |
+
log_every_n_steps=self.log_every_n_steps,
|
| 566 |
+
fast_dev_run=self.fast_dev_run,
|
| 567 |
+
callbacks=callbacks,
|
| 568 |
+
logger=self._logger,
|
| 569 |
+
enable_checkpointing=self.save_checkpoints,
|
| 570 |
+
accumulate_grad_batches=self.accumulate_grad_batches,
|
| 571 |
+
)
|
| 572 |
+
self._trainer = trainer
|
| 573 |
+
return trainer
|
| 574 |
+
|
| 575 |
+
def fit(self, train_loader: DataLoader, valid_loader: DataLoader) -> None:
|
| 576 |
+
self._trainer.fit(
|
| 577 |
+
model=self._model,
|
| 578 |
+
train_dataloaders=train_loader,
|
| 579 |
+
val_dataloaders=valid_loader,
|
| 580 |
+
ckpt_path=self._get_checkpoint_path(),
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
def test(self, test_loader: DataLoader) -> None:
|
| 584 |
+
if self.checkpoint_path:
|
| 585 |
+
ckpt_path = self.checkpoint_path
|
| 586 |
+
else:
|
| 587 |
+
if self.save_checkpoints:
|
| 588 |
+
ckpt_path = Path(self.infra.folder) / "best.ckpt"
|
| 589 |
+
else:
|
| 590 |
+
ckpt_path = None
|
| 591 |
+
self._trainer.test(
|
| 592 |
+
self._model,
|
| 593 |
+
dataloaders=test_loader,
|
| 594 |
+
ckpt_path=ckpt_path,
|
| 595 |
+
)
|
| 596 |
+
|
| 597 |
+
def setup_run(self):
|
| 598 |
+
|
| 599 |
+
if self.infra.cluster and self.infra.status() != "not submitted":
|
| 600 |
+
for out_type in ["stdout", "stderr"]:
|
| 601 |
+
old_path = Path(getattr(self.infra.job().paths, out_type))
|
| 602 |
+
new_path = Path(self.infra.folder) / f"log.{out_type}"
|
| 603 |
+
try:
|
| 604 |
+
if new_path.exists():
|
| 605 |
+
os.remove(new_path)
|
| 606 |
+
os.symlink(
|
| 607 |
+
old_path,
|
| 608 |
+
new_path,
|
| 609 |
+
)
|
| 610 |
+
except Exception:
|
| 611 |
+
pass
|
| 612 |
+
config_path = Path(self.infra.folder) / "config.yaml"
|
| 613 |
+
os.makedirs(self.infra.folder, exist_ok=True)
|
| 614 |
+
with open(config_path, "w") as outfile:
|
| 615 |
+
yaml.dump(
|
| 616 |
+
self.model_dump(),
|
| 617 |
+
outfile,
|
| 618 |
+
indent=4,
|
| 619 |
+
default_flow_style=False,
|
| 620 |
+
sort_keys=False,
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
@infra.apply
|
| 624 |
+
def run(self):
|
| 625 |
+
import lightning.pytorch as pl
|
| 626 |
+
|
| 627 |
+
self.setup_run()
|
| 628 |
+
self._logger = (
|
| 629 |
+
self.wandb_config.build(
|
| 630 |
+
save_dir=self.infra.folder,
|
| 631 |
+
xp_config=self.model_dump(),
|
| 632 |
+
id=f"{self.wandb_config.group}-{self.infra.uid().split('-')[-1]}",
|
| 633 |
+
)
|
| 634 |
+
if self.wandb_config
|
| 635 |
+
else None
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
if self.seed is not None:
|
| 639 |
+
pl.seed_everything(self.seed, workers=True)
|
| 640 |
+
np.random.seed(self.seed)
|
| 641 |
+
torch.manual_seed(self.seed)
|
| 642 |
+
|
| 643 |
+
loaders = self.data.get_loaders(
|
| 644 |
+
split_to_build="val" if self.test_only else None
|
| 645 |
+
)
|
| 646 |
+
self._setup_trainer(next(iter(loaders.values())))
|
| 647 |
+
|
| 648 |
+
if not self.test_only:
|
| 649 |
+
self.fit(loaders["train"], loaders["val"])
|
| 650 |
+
|
| 651 |
+
self.test(loaders["val"])
|
src/cortexlab/training/pl_module.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""Custom lightning module that wraps a pytorch model.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import typing as tp
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
import lightning.pytorch as pl
|
| 14 |
+
from einops import rearrange
|
| 15 |
+
from neuralset.dataloader import SegmentData
|
| 16 |
+
from neuraltrain.optimizers import BaseOptimizer
|
| 17 |
+
from torch import nn
|
| 18 |
+
from torchmetrics import Metric
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class BrainModule(pl.LightningModule):
|
| 22 |
+
"""Torch-lightning module for fMRI encoding model training."""
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
model: nn.Module,
|
| 27 |
+
loss: nn.Module,
|
| 28 |
+
optim_config: BaseOptimizer,
|
| 29 |
+
metrics: dict[str, Metric],
|
| 30 |
+
checkpoint_path: Path | None = None,
|
| 31 |
+
config: dict[str, tp.Any] | None = None,
|
| 32 |
+
) -> None:
|
| 33 |
+
super().__init__()
|
| 34 |
+
self.model = model
|
| 35 |
+
self.checkpoint_path = checkpoint_path
|
| 36 |
+
self.config = config
|
| 37 |
+
|
| 38 |
+
# Optimizer
|
| 39 |
+
self.optim_config = optim_config
|
| 40 |
+
|
| 41 |
+
self.loss = loss
|
| 42 |
+
self.metrics = metrics
|
| 43 |
+
|
| 44 |
+
def forward(self, batch):
|
| 45 |
+
return self.model(batch)
|
| 46 |
+
|
| 47 |
+
def on_save_checkpoint(self, checkpoint):
|
| 48 |
+
checkpoint["model_build_args"] = {
|
| 49 |
+
"feature_dims": self.model.feature_dims,
|
| 50 |
+
"n_outputs": self.model.n_outputs,
|
| 51 |
+
"n_output_timesteps": self.model.n_output_timesteps,
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
def _run_step(
|
| 55 |
+
self, batch: SegmentData, batch_idx, step_name, dataloader_idx: int = 0
|
| 56 |
+
):
|
| 57 |
+
y_true = batch.data["fmri"] # B, D, T
|
| 58 |
+
y_pred = self.forward(batch) # B, D, T
|
| 59 |
+
if step_name == "val":
|
| 60 |
+
y_true = y_true[:, :, self.config["data.overlap_trs_val"] :]
|
| 61 |
+
y_pred = y_pred[:, :, self.config["data.overlap_trs_val"] :]
|
| 62 |
+
subject_ids_flat = batch.data["subject_id"].repeat_interleave(
|
| 63 |
+
y_pred.shape[2], 0
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
y_pred_flat = rearrange(y_pred, "b d t -> (b t) d")
|
| 67 |
+
y_true_flat = rearrange(y_true, "b d t -> (b t) d")
|
| 68 |
+
if not self.config["data.stride_drop_incomplete"]:
|
| 69 |
+
bad_indices = (y_true_flat == 0).all(dim=1)
|
| 70 |
+
y_pred_flat = y_pred_flat[~bad_indices]
|
| 71 |
+
y_true_flat = y_true_flat[~bad_indices]
|
| 72 |
+
subject_ids_flat = subject_ids_flat[~bad_indices]
|
| 73 |
+
|
| 74 |
+
loss = self.loss(y_pred_flat, y_true_flat).mean()
|
| 75 |
+
log_kwargs = {
|
| 76 |
+
"on_step": True if step_name == "train" else False,
|
| 77 |
+
"on_epoch": True,
|
| 78 |
+
"logger": True,
|
| 79 |
+
"prog_bar": True,
|
| 80 |
+
"batch_size": y_pred.shape[0],
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
self.log(
|
| 84 |
+
f"{step_name}/loss",
|
| 85 |
+
loss,
|
| 86 |
+
**log_kwargs,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# Compute metrics
|
| 90 |
+
for metric_name, metric in self.metrics.items():
|
| 91 |
+
if metric_name.startswith(step_name):
|
| 92 |
+
if "grouped" in metric.__class__.__name__.lower():
|
| 93 |
+
metric.update(y_pred_flat, y_true_flat, groups=subject_ids_flat)
|
| 94 |
+
else:
|
| 95 |
+
if "retrieval" in metric_name:
|
| 96 |
+
metric.update(y_pred.mean(dim=-1), y_true.mean(dim=-1))
|
| 97 |
+
else:
|
| 98 |
+
metric.update(y_pred_flat, y_true_flat)
|
| 99 |
+
self.log(
|
| 100 |
+
metric_name,
|
| 101 |
+
metric,
|
| 102 |
+
**log_kwargs,
|
| 103 |
+
)
|
| 104 |
+
return loss, y_pred.detach().cpu(), y_true.detach().cpu()
|
| 105 |
+
|
| 106 |
+
def on_val_or_test_epoch_end(self, step_name: str) -> None:
|
| 107 |
+
for metric_name, metric in self.metrics.items():
|
| 108 |
+
if metric_name.startswith(step_name):
|
| 109 |
+
if "grouped" in metric.__class__.__name__.lower():
|
| 110 |
+
subject_id_to_name = {
|
| 111 |
+
v: k
|
| 112 |
+
for k, v in self.config[
|
| 113 |
+
"data.subject_id.predefined_mapping"
|
| 114 |
+
].items()
|
| 115 |
+
}
|
| 116 |
+
metric_dict = {
|
| 117 |
+
metric_name + "/" + subject_id_to_name[int(k)]: v
|
| 118 |
+
for k, v in metric.compute().items()
|
| 119 |
+
}
|
| 120 |
+
self.log_dict(metric_dict)
|
| 121 |
+
metric.reset()
|
| 122 |
+
|
| 123 |
+
def on_validation_epoch_end(self) -> None:
|
| 124 |
+
self.on_val_or_test_epoch_end("val")
|
| 125 |
+
return super().on_validation_epoch_end()
|
| 126 |
+
|
| 127 |
+
def on_test_epoch_end(self) -> None:
|
| 128 |
+
self.on_val_or_test_epoch_end("test")
|
| 129 |
+
return super().on_test_epoch_end()
|
| 130 |
+
|
| 131 |
+
def training_step(self, batch: SegmentData, batch_idx):
|
| 132 |
+
loss, _, _ = self._run_step(batch, batch_idx, step_name="train")
|
| 133 |
+
return loss
|
| 134 |
+
|
| 135 |
+
def validation_step(self, batch: SegmentData, batch_idx, dataloader_idx: int = 0):
|
| 136 |
+
_, y_pred, y_true = self._run_step(
|
| 137 |
+
batch, batch_idx, step_name="val", dataloader_idx=dataloader_idx
|
| 138 |
+
)
|
| 139 |
+
return y_pred, y_true
|
| 140 |
+
|
| 141 |
+
def test_step(self, batch: SegmentData, batch_idx, dataloader_idx: int = 0):
|
| 142 |
+
_, y_pred, y_true = self._run_step(
|
| 143 |
+
batch, batch_idx, step_name="test", dataloader_idx=dataloader_idx
|
| 144 |
+
)
|
| 145 |
+
return y_pred, y_true
|
| 146 |
+
|
| 147 |
+
def configure_optimizers(self):
|
| 148 |
+
optim_config = self.optim_config.copy()
|
| 149 |
+
unfrozen_params = [p for p in self.parameters() if p.requires_grad]
|
| 150 |
+
if self.config["max_steps"] > 0:
|
| 151 |
+
total_steps = self.config["max_steps"]
|
| 152 |
+
else:
|
| 153 |
+
total_steps = self.trainer.estimated_stepping_batches
|
| 154 |
+
optimizer = optim_config.build(unfrozen_params, total_steps=total_steps)
|
| 155 |
+
return optimizer
|
src/cortexlab/viz/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
from .base import BasePlotBrain
|
| 8 |
+
from .cortical import PlotBrainNilearn
|
| 9 |
+
from .cortical_pv import PlotBrainPyvista
|
| 10 |
+
from .subcortical import get_subcortical_roi_indices, plot_subcortical
|
| 11 |
+
from .utils import (
|
| 12 |
+
combine_mosaics,
|
| 13 |
+
convert_ax_to_2d,
|
| 14 |
+
convert_ax_to_3d,
|
| 15 |
+
get_cmap,
|
| 16 |
+
get_pval_stars,
|
| 17 |
+
label_ax,
|
| 18 |
+
move_ax,
|
| 19 |
+
plot_colorbar,
|
| 20 |
+
plot_rgb_colorbar,
|
| 21 |
+
saturate_colors,
|
| 22 |
+
set_title,
|
| 23 |
+
shrink_ax,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
PlotBrain = PlotBrainPyvista
|
src/cortexlab/viz/base.py
ADDED
|
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import typing as tp
|
| 8 |
+
from functools import lru_cache
|
| 9 |
+
|
| 10 |
+
import matplotlib
|
| 11 |
+
import nibabel as nib
|
| 12 |
+
import numpy as np
|
| 13 |
+
import pydantic
|
| 14 |
+
from neuralset.extractors.neuro import FSAVERAGE_SIZES
|
| 15 |
+
from nilearn import datasets, image, maskers, surface
|
| 16 |
+
from scipy.spatial import cKDTree
|
| 17 |
+
|
| 18 |
+
cached_fetch_surf_fsaverage = lru_cache(datasets.fetch_surf_fsaverage)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class BasePlotBrain(pydantic.BaseModel):
|
| 22 |
+
mesh: (
|
| 23 |
+
tp.Literal["fsaverage3", "fsaverage4", "fsaverage5", "fsaverage6", "fsaverage7"]
|
| 24 |
+
| None
|
| 25 |
+
) = "fsaverage5"
|
| 26 |
+
inflate: bool | tp.Literal["half"] = "half"
|
| 27 |
+
bg_map: tp.Literal["sulcal", "curvature", "thresholded"] = "sulcal"
|
| 28 |
+
hemisphere_gap: float = 0
|
| 29 |
+
atlas_name: str | None = None
|
| 30 |
+
atlas_dim: int | None = None
|
| 31 |
+
vol_to_surf_kwargs: dict | None = None
|
| 32 |
+
model_config = pydantic.ConfigDict(extra="forbid")
|
| 33 |
+
|
| 34 |
+
VIEW_DICT: tp.ClassVar[dict] = {}
|
| 35 |
+
|
| 36 |
+
def model_post_init(self, __context: tp.Any) -> None:
|
| 37 |
+
self._mesh = self.get_mesh()
|
| 38 |
+
|
| 39 |
+
# ------------------------------------------------------------------
|
| 40 |
+
# Axes helpers
|
| 41 |
+
# ------------------------------------------------------------------
|
| 42 |
+
|
| 43 |
+
def get_axarr_and_views(self, axes, views):
|
| 44 |
+
if isinstance(axes, dict):
|
| 45 |
+
axes = {k: self._convert_ax(ax) for k, ax in axes.items()}
|
| 46 |
+
if all(k in self.VIEW_DICT for k in axes):
|
| 47 |
+
views, axarr = zip(*axes.items())
|
| 48 |
+
else:
|
| 49 |
+
axarr = list(axes.values())
|
| 50 |
+
elif isinstance(axes, (list, np.ndarray)):
|
| 51 |
+
axarr = axes
|
| 52 |
+
elif isinstance(axes, matplotlib.axes.Axes):
|
| 53 |
+
axarr = [axes]
|
| 54 |
+
assert len(views) == len(
|
| 55 |
+
axarr
|
| 56 |
+
), f"Number of views and axes must match, got {len(views)} and {len(axarr)}"
|
| 57 |
+
return views, axarr
|
| 58 |
+
|
| 59 |
+
def _convert_ax(self, ax):
|
| 60 |
+
"""Hook for subclasses that need to convert axes (e.g. 3D -> 2D)."""
|
| 61 |
+
return ax
|
| 62 |
+
|
| 63 |
+
# ------------------------------------------------------------------
|
| 64 |
+
# Atlas / volume-to-surface helpers
|
| 65 |
+
# ------------------------------------------------------------------
|
| 66 |
+
|
| 67 |
+
def get_atlas(self):
|
| 68 |
+
if not hasattr(self, "_atlas"):
|
| 69 |
+
if self.atlas_name == "schaefer_2018":
|
| 70 |
+
atlas = datasets.fetch_atlas_schaefer_2018(n_rois=self.atlas_dim)
|
| 71 |
+
elif self.atlas_name == "difumo":
|
| 72 |
+
atlas = datasets.fetch_atlas_difumo(dimension=self.atlas_dim)
|
| 73 |
+
self._atlas = atlas
|
| 74 |
+
return self._atlas
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def atlas_masker(self):
|
| 78 |
+
if not hasattr(self, "_atlas_masker"):
|
| 79 |
+
atlas = self.get_atlas()
|
| 80 |
+
if self.atlas_name == "schaefer_2018":
|
| 81 |
+
atlas_masker = maskers.NiftiLabelsMasker(labels_img=atlas["maps"])
|
| 82 |
+
elif self.atlas_name == "difumo":
|
| 83 |
+
atlas_masker = maskers.NiftiMapsMasker(maps_img=atlas["maps"])
|
| 84 |
+
atlas_masker.fit()
|
| 85 |
+
self._atlas_masker = atlas_masker
|
| 86 |
+
return self._atlas_masker
|
| 87 |
+
|
| 88 |
+
def atlas_to_surf(self, signals, img_threshold: float | None = None):
|
| 89 |
+
signals_nii = self.signals_to_nii(signals)
|
| 90 |
+
return self.vol_to_surf(signals_nii, img_threshold=img_threshold)
|
| 91 |
+
|
| 92 |
+
def vol_to_surf(self, signals_nii, img_threshold: float | None = None):
|
| 93 |
+
vol_to_surf_kwargs = self.vol_to_surf_kwargs or {}
|
| 94 |
+
if img_threshold is not None:
|
| 95 |
+
signals_nii = image.threshold_img(
|
| 96 |
+
signals_nii,
|
| 97 |
+
threshold=img_threshold,
|
| 98 |
+
copy=False,
|
| 99 |
+
copy_header=True,
|
| 100 |
+
)
|
| 101 |
+
fsaverage = cached_fetch_surf_fsaverage(mesh=self.mesh)
|
| 102 |
+
hemis = [
|
| 103 |
+
surface.vol_to_surf(
|
| 104 |
+
signals_nii,
|
| 105 |
+
surf_mesh=fsaverage[f"pial_{hemi}"],
|
| 106 |
+
kind="ball",
|
| 107 |
+
**vol_to_surf_kwargs,
|
| 108 |
+
)
|
| 109 |
+
for hemi in ("left", "right")
|
| 110 |
+
]
|
| 111 |
+
return np.concatenate(hemis)
|
| 112 |
+
|
| 113 |
+
def signals_to_nii(self, signals):
|
| 114 |
+
out = self.atlas_masker.inverse_transform(signals)
|
| 115 |
+
if isinstance(self.atlas_masker, maskers.NiftiMapsMasker):
|
| 116 |
+
data = out.get_fdata()
|
| 117 |
+
lo, hi = signals.min(), signals.max()
|
| 118 |
+
data = (data - data.min()) / (data.max() - data.min())
|
| 119 |
+
data = data * (hi - lo) + lo
|
| 120 |
+
out = nib.Nifti1Image(data, out.affine, out.header)
|
| 121 |
+
return out
|
| 122 |
+
|
| 123 |
+
# ------------------------------------------------------------------
|
| 124 |
+
# Mesh loading (eager – called once in model_post_init)
|
| 125 |
+
# ------------------------------------------------------------------
|
| 126 |
+
|
| 127 |
+
def get_mesh(self) -> dict:
|
| 128 |
+
"""Load mesh geometry and background maps for both hemispheres.
|
| 129 |
+
|
| 130 |
+
Returns a dict with keys ``'left'``, ``'right'``, ``'both'``,
|
| 131 |
+
each mapping to ``{'coords': array, 'faces': array, 'bg_map': array}``.
|
| 132 |
+
The ``'both'`` entry has hemisphere_gap applied.
|
| 133 |
+
"""
|
| 134 |
+
fs_out = cached_fetch_surf_fsaverage(self.mesh)
|
| 135 |
+
|
| 136 |
+
out = {}
|
| 137 |
+
for hemi in ("left", "right"):
|
| 138 |
+
infl_out_xyz, _ = nib.load(getattr(fs_out, f"infl_{hemi}")).darrays
|
| 139 |
+
pial_xyz, faces = nib.load(getattr(fs_out, f"pial_{hemi}")).darrays
|
| 140 |
+
|
| 141 |
+
alpha = 0.5
|
| 142 |
+
jr_xyz = infl_out_xyz.data * alpha + (1 - alpha) * pial_xyz.data
|
| 143 |
+
if self.inflate == "half":
|
| 144 |
+
coords = jr_xyz
|
| 145 |
+
elif self.inflate is True:
|
| 146 |
+
coords = infl_out_xyz.data
|
| 147 |
+
elif self.inflate is False:
|
| 148 |
+
coords = pial_xyz.data
|
| 149 |
+
|
| 150 |
+
bg_key = "curv" if self.bg_map == "curvature" else "sulc"
|
| 151 |
+
bg_map = nib.load(getattr(fs_out, f"{bg_key}_{hemi}")).darrays[0].data
|
| 152 |
+
if self.bg_map == "thresholded":
|
| 153 |
+
bg_map = 1.0 * (bg_map > -0.10)
|
| 154 |
+
bg_map[-1] = -5
|
| 155 |
+
bg_map[-2] = 2.0
|
| 156 |
+
if hemi == "left":
|
| 157 |
+
coords[:, 0] = coords[:, 0] - coords[:, 0].max() - self.hemisphere_gap
|
| 158 |
+
else:
|
| 159 |
+
coords[:, 0] = coords[:, 0] - coords[:, 0].min() + self.hemisphere_gap
|
| 160 |
+
|
| 161 |
+
out[hemi] = dict(coords=coords, faces=faces.data, bg_map=bg_map)
|
| 162 |
+
|
| 163 |
+
out["both"] = dict(
|
| 164 |
+
coords=np.r_[out["left"]["coords"], out["right"]["coords"]],
|
| 165 |
+
faces=np.r_[
|
| 166 |
+
out["left"]["faces"],
|
| 167 |
+
out["right"]["faces"] + out["left"]["faces"].max() + 1,
|
| 168 |
+
],
|
| 169 |
+
bg_map=np.r_[out["left"]["bg_map"], out["right"]["bg_map"]],
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
return out
|
| 173 |
+
|
| 174 |
+
# ------------------------------------------------------------------
|
| 175 |
+
# Stat-map upsampling (lazy – called per data array)
|
| 176 |
+
# ------------------------------------------------------------------
|
| 177 |
+
|
| 178 |
+
def get_stat_map(self, data: np.ndarray) -> dict:
|
| 179 |
+
"""Split vertex data into hemispheres, upsampling if needed.
|
| 180 |
+
|
| 181 |
+
Returns ``{'left': array, 'right': array, 'both': array}``.
|
| 182 |
+
"""
|
| 183 |
+
in_mesh = None
|
| 184 |
+
for name, size in FSAVERAGE_SIZES.items():
|
| 185 |
+
if data.shape[0] // 2 == size:
|
| 186 |
+
in_mesh = name
|
| 187 |
+
break
|
| 188 |
+
if in_mesh is None:
|
| 189 |
+
raise ValueError(f"Incoherent number of vertices: {data.shape[0]}")
|
| 190 |
+
|
| 191 |
+
left = data[: len(data) // 2]
|
| 192 |
+
right = data[len(data) // 2 :]
|
| 193 |
+
|
| 194 |
+
if in_mesh != self.mesh:
|
| 195 |
+
fs_in = cached_fetch_surf_fsaverage(in_mesh)
|
| 196 |
+
fs_out = cached_fetch_surf_fsaverage(self.mesh)
|
| 197 |
+
resampled = {}
|
| 198 |
+
for hemi, values in (("left", left), ("right", right)):
|
| 199 |
+
infl_in_xyz, _ = nib.load(getattr(fs_in, f"infl_{hemi}")).darrays
|
| 200 |
+
infl_out_xyz, _ = nib.load(getattr(fs_out, f"infl_{hemi}")).darrays
|
| 201 |
+
tree = cKDTree(infl_in_xyz.data)
|
| 202 |
+
distances, indices = tree.query(infl_out_xyz.data, k=5)
|
| 203 |
+
if "int" in data.dtype.name:
|
| 204 |
+
# get most frequent
|
| 205 |
+
resampled[hemi] = np.apply_along_axis(
|
| 206 |
+
lambda x: np.bincount(x).argmax(), axis=1, arr=values[indices]
|
| 207 |
+
)
|
| 208 |
+
else:
|
| 209 |
+
distances = np.where(distances == 0, 1e-12, distances)
|
| 210 |
+
weights = 1 / distances
|
| 211 |
+
weights = weights / weights.sum(axis=1, keepdims=True)
|
| 212 |
+
resampled[hemi] = np.sum(values[indices] * weights, axis=1)
|
| 213 |
+
left, right = resampled["left"], resampled["right"]
|
| 214 |
+
|
| 215 |
+
return dict(left=left, right=right, both=np.r_[left, right])
|
| 216 |
+
|
| 217 |
+
def get_hemis(self, data: np.ndarray) -> dict:
|
| 218 |
+
"""Convenience: combine ``self._mesh`` geometry with stat-map data."""
|
| 219 |
+
stat_maps = self.get_stat_map(data)
|
| 220 |
+
out = {}
|
| 221 |
+
for hemi in ("left", "right", "both"):
|
| 222 |
+
m = self._mesh[hemi]
|
| 223 |
+
out[hemi] = dict(
|
| 224 |
+
stat_map=stat_maps[hemi],
|
| 225 |
+
surf_mesh=(m["coords"], m["faces"]),
|
| 226 |
+
bg_map=m["bg_map"],
|
| 227 |
+
hemi=hemi,
|
| 228 |
+
)
|
| 229 |
+
return out
|
| 230 |
+
|
| 231 |
+
# ------------------------------------------------------------------
|
| 232 |
+
# Multi-timestep plotting
|
| 233 |
+
# ------------------------------------------------------------------
|
| 234 |
+
|
| 235 |
+
def plot_timesteps(
|
| 236 |
+
self,
|
| 237 |
+
neuro: np.ndarray | dict[str, np.ndarray],
|
| 238 |
+
segments=None,
|
| 239 |
+
*,
|
| 240 |
+
plot_every_k_timesteps: int = 1,
|
| 241 |
+
trues=None,
|
| 242 |
+
norm_percentile=None,
|
| 243 |
+
show_stimuli=False,
|
| 244 |
+
views: str | dict[str, str] = "left",
|
| 245 |
+
timestamps: list[float] | None = None,
|
| 246 |
+
**kwargs,
|
| 247 |
+
):
|
| 248 |
+
import matplotlib.pyplot as plt
|
| 249 |
+
from tqdm import tqdm
|
| 250 |
+
|
| 251 |
+
from cortexlab.viz.utils import robust_normalize
|
| 252 |
+
|
| 253 |
+
TEXT_KEY, SOUND_KEY, VIDEO_KEY = "Text", "Audio", "Video"
|
| 254 |
+
|
| 255 |
+
if isinstance(neuro, np.ndarray):
|
| 256 |
+
neuro = {"Brain reponse": neuro}
|
| 257 |
+
assert all(
|
| 258 |
+
v.ndim == 2 for v in neuro.values()
|
| 259 |
+
), "Neuro must be a dictionary of 2D arrays"
|
| 260 |
+
if isinstance(views, dict):
|
| 261 |
+
assert all(
|
| 262 |
+
key in views.keys() for key in neuro.keys()
|
| 263 |
+
), f"Views keys {views.keys()} do not match neuro keys {neuro.keys()}"
|
| 264 |
+
total_n_timesteps = len(list(neuro.values())[0])
|
| 265 |
+
assert (
|
| 266 |
+
total_n_timesteps % plot_every_k_timesteps == 0
|
| 267 |
+
), f"Total number of timesteps {total_n_timesteps} must be divisible by plot_every_k_timesteps {plot_every_k_timesteps}"
|
| 268 |
+
neuro = {k: v[::plot_every_k_timesteps] for k, v in neuro.items()}
|
| 269 |
+
n_timesteps = len(list(neuro.values())[0])
|
| 270 |
+
if timestamps is None:
|
| 271 |
+
timestamps = range(
|
| 272 |
+
0, n_timesteps * plot_every_k_timesteps, plot_every_k_timesteps
|
| 273 |
+
)
|
| 274 |
+
else:
|
| 275 |
+
assert (
|
| 276 |
+
len(timestamps) == n_timesteps
|
| 277 |
+
), f"Number of timestamps {len(timestamps)} must match number of timesteps {n_timesteps}"
|
| 278 |
+
if norm_percentile is not None:
|
| 279 |
+
neuro = {
|
| 280 |
+
k: robust_normalize(v, percentile=norm_percentile)
|
| 281 |
+
for k, v in neuro.items()
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
mosaic = [[f"{k}_{i}" for i in range(n_timesteps)] for k in neuro]
|
| 285 |
+
height_ratios = [1 for _ in neuro]
|
| 286 |
+
if show_stimuli:
|
| 287 |
+
from cortexlab.viz.utils import get_clip
|
| 288 |
+
|
| 289 |
+
has_image = any(get_clip(segment) is not None for segment in segments)
|
| 290 |
+
stimuli_mosaic = [
|
| 291 |
+
[SOUND_KEY] * n_timesteps,
|
| 292 |
+
[TEXT_KEY] * n_timesteps,
|
| 293 |
+
]
|
| 294 |
+
stimuli_height_ratios = [0.3, 0.3]
|
| 295 |
+
if has_image:
|
| 296 |
+
stimuli_mosaic = [
|
| 297 |
+
[f"{VIDEO_KEY}_{i}" for i in range(n_timesteps)]
|
| 298 |
+
] + stimuli_mosaic
|
| 299 |
+
stimuli_height_ratios = [0.7] + stimuli_height_ratios
|
| 300 |
+
mosaic = stimuli_mosaic + mosaic
|
| 301 |
+
height_ratios = stimuli_height_ratios + height_ratios
|
| 302 |
+
|
| 303 |
+
fig, axes = plt.subplot_mosaic(
|
| 304 |
+
mosaic,
|
| 305 |
+
height_ratios=height_ratios,
|
| 306 |
+
figsize=(2.5 * n_timesteps, 2 * sum(height_ratios)),
|
| 307 |
+
gridspec_kw={"wspace": 0.0, "hspace": 0},
|
| 308 |
+
)
|
| 309 |
+
for k, ax in axes.items():
|
| 310 |
+
if (
|
| 311 |
+
k.startswith(TEXT_KEY)
|
| 312 |
+
or k.startswith(SOUND_KEY)
|
| 313 |
+
or k.startswith(VIDEO_KEY)
|
| 314 |
+
):
|
| 315 |
+
fig.delaxes(ax)
|
| 316 |
+
axes[k] = fig.add_subplot(ax.get_subplotspec())
|
| 317 |
+
|
| 318 |
+
for i in tqdm(range(n_timesteps), desc="Plotting..."):
|
| 319 |
+
for j, (key, value) in enumerate(neuro.items()):
|
| 320 |
+
self.plot_surf(
|
| 321 |
+
value[i],
|
| 322 |
+
axes=axes[f"{key}_{i}"],
|
| 323 |
+
views=views[key] if isinstance(views, dict) else views,
|
| 324 |
+
**kwargs,
|
| 325 |
+
)
|
| 326 |
+
if j == len(neuro) - 1:
|
| 327 |
+
title = (
|
| 328 |
+
f"t={timestamps[i]}s" if timestamps is not None else f"t={i}s"
|
| 329 |
+
)
|
| 330 |
+
fig.text(
|
| 331 |
+
0.5,
|
| 332 |
+
-0.1,
|
| 333 |
+
title,
|
| 334 |
+
transform=axes[f"{key}_{i}"].transAxes,
|
| 335 |
+
ha="center",
|
| 336 |
+
va="center",
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
if show_stimuli:
|
| 340 |
+
self.plot_stimuli(
|
| 341 |
+
segments, axes, plot_every_k_timesteps=plot_every_k_timesteps
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
first_neuro_keys = [key + "_0" for key in list(neuro.keys())]
|
| 345 |
+
left, full_width = (
|
| 346 |
+
axes[first_neuro_keys[0]].get_position().x0,
|
| 347 |
+
fig.get_figwidth(),
|
| 348 |
+
)
|
| 349 |
+
for key, label in zip(
|
| 350 |
+
first_neuro_keys + [TEXT_KEY, SOUND_KEY, f"{VIDEO_KEY}_0"],
|
| 351 |
+
list(neuro.keys()) + [TEXT_KEY, SOUND_KEY, VIDEO_KEY],
|
| 352 |
+
):
|
| 353 |
+
if key not in axes:
|
| 354 |
+
continue
|
| 355 |
+
pos = axes[key].get_position()
|
| 356 |
+
fig.text(
|
| 357 |
+
left,
|
| 358 |
+
(pos.y0 + pos.y1) / 2,
|
| 359 |
+
label + "\n\n\n",
|
| 360 |
+
rotation="vertical",
|
| 361 |
+
va="center",
|
| 362 |
+
ha="center",
|
| 363 |
+
transform=fig.transFigure,
|
| 364 |
+
)
|
| 365 |
+
return fig
|
| 366 |
+
|
| 367 |
+
@staticmethod
|
| 368 |
+
def plot_stimuli(
|
| 369 |
+
segments,
|
| 370 |
+
axes,
|
| 371 |
+
plot_every_k_timesteps=1,
|
| 372 |
+
):
|
| 373 |
+
import matplotlib.pyplot as plt
|
| 374 |
+
|
| 375 |
+
from cortexlab.viz.utils import get_audio, get_clip
|
| 376 |
+
|
| 377 |
+
TEXT_KEY, SOUND_KEY, VIDEO_KEY = "Text", "Audio", "Video"
|
| 378 |
+
|
| 379 |
+
audio = get_audio(
|
| 380 |
+
segments[0], stop_offset=(len(segments) - 1) * segments[0].duration
|
| 381 |
+
)
|
| 382 |
+
soundarray = audio.to_soundarray().mean(axis=1)
|
| 383 |
+
axes[SOUND_KEY].plot(soundarray, color="k")
|
| 384 |
+
axes[SOUND_KEY].set_xlim(0, len(soundarray))
|
| 385 |
+
axes[SOUND_KEY].axis("off")
|
| 386 |
+
axes[TEXT_KEY].axis("off")
|
| 387 |
+
full_start, full_duration = (
|
| 388 |
+
segments[0].start,
|
| 389 |
+
len(segments) * segments[0].duration,
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
for i, segment in enumerate(segments):
|
| 393 |
+
if f"{VIDEO_KEY}_0" in axes and i % plot_every_k_timesteps == 0:
|
| 394 |
+
ax_idx = i // plot_every_k_timesteps
|
| 395 |
+
img = get_clip(segment).get_frame(0)
|
| 396 |
+
margin = img.shape[1] * 0.0
|
| 397 |
+
ax = axes[f"{VIDEO_KEY}_{ax_idx}"]
|
| 398 |
+
im = ax.imshow(img)
|
| 399 |
+
patch = plt.matplotlib.patches.FancyBboxPatch(
|
| 400 |
+
(0, 0),
|
| 401 |
+
img.shape[1],
|
| 402 |
+
img.shape[0],
|
| 403 |
+
boxstyle="round,pad=0,rounding_size=200",
|
| 404 |
+
transform=ax.transData,
|
| 405 |
+
clip_on=False,
|
| 406 |
+
facecolor="none",
|
| 407 |
+
edgecolor="none",
|
| 408 |
+
)
|
| 409 |
+
ax.add_patch(patch)
|
| 410 |
+
im.set_clip_path(patch)
|
| 411 |
+
ax.set_xlim(-margin, img.shape[1] + margin)
|
| 412 |
+
ax.set_ylim(img.shape[0] + margin, -margin)
|
| 413 |
+
ax.axis("off")
|
| 414 |
+
events = segment.events
|
| 415 |
+
words = events[events.type == "Word"]
|
| 416 |
+
for word in words.itertuples():
|
| 417 |
+
if word.start < full_start:
|
| 418 |
+
continue
|
| 419 |
+
axes[TEXT_KEY].text(
|
| 420 |
+
(word.start - full_start) / full_duration,
|
| 421 |
+
0.5,
|
| 422 |
+
word.text,
|
| 423 |
+
color="k",
|
| 424 |
+
transform=axes[TEXT_KEY].transAxes,
|
| 425 |
+
ha="center",
|
| 426 |
+
va="center",
|
| 427 |
+
rotation=45,
|
| 428 |
+
fontsize=10,
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
def plot_timesteps_mp4(
|
| 432 |
+
self,
|
| 433 |
+
neuro,
|
| 434 |
+
filepath,
|
| 435 |
+
*,
|
| 436 |
+
segments=None,
|
| 437 |
+
suptitle=None,
|
| 438 |
+
interpolated_fps=None,
|
| 439 |
+
norm_percentile=100,
|
| 440 |
+
**plot_kwargs,
|
| 441 |
+
):
|
| 442 |
+
import subprocess
|
| 443 |
+
from pathlib import Path
|
| 444 |
+
|
| 445 |
+
import matplotlib.pyplot as plt
|
| 446 |
+
from tqdm import tqdm
|
| 447 |
+
|
| 448 |
+
filepath = Path(filepath)
|
| 449 |
+
tmp_dir = filepath.parent / "tmp"
|
| 450 |
+
tmp_dir.mkdir(parents=True, exist_ok=True)
|
| 451 |
+
for i in tqdm(range(len(neuro)), desc="Plotting..."):
|
| 452 |
+
out_fig, ax = plt.subplots(1, 1, figsize=(3, 3))
|
| 453 |
+
self.plot_surf(
|
| 454 |
+
neuro[i],
|
| 455 |
+
axes=[ax],
|
| 456 |
+
**plot_kwargs,
|
| 457 |
+
)
|
| 458 |
+
title = suptitle or f"t = {i}s"
|
| 459 |
+
out_fig.suptitle(title, fontsize=14, fontweight="bold")
|
| 460 |
+
if segments:
|
| 461 |
+
from cortexlab.viz.utils import get_text
|
| 462 |
+
|
| 463 |
+
words = " ".join(get_text(segments[i]).split(" ")[-8:])
|
| 464 |
+
out_fig.text(0.1, 0.92, words, fontsize=9, ha="left", va="top")
|
| 465 |
+
tmp_fig = tmp_dir / f"tmp_{i:05d}.png"
|
| 466 |
+
out_fig.savefig(tmp_fig, dpi=300)
|
| 467 |
+
plt.close(out_fig)
|
| 468 |
+
cmd = [
|
| 469 |
+
"ffmpeg",
|
| 470 |
+
"-y",
|
| 471 |
+
"-framerate",
|
| 472 |
+
str(1),
|
| 473 |
+
"-i",
|
| 474 |
+
f"{str(tmp_dir)}/tmp_%05d.png",
|
| 475 |
+
]
|
| 476 |
+
if interpolated_fps is not None:
|
| 477 |
+
cmd.append("-vf")
|
| 478 |
+
cmd.append(f"minterpolate=fps={interpolated_fps}")
|
| 479 |
+
cmd.extend(
|
| 480 |
+
[
|
| 481 |
+
"-c:v",
|
| 482 |
+
"libx264",
|
| 483 |
+
"-crf",
|
| 484 |
+
"18",
|
| 485 |
+
"-pix_fmt",
|
| 486 |
+
"yuv420p",
|
| 487 |
+
str(filepath),
|
| 488 |
+
]
|
| 489 |
+
)
|
| 490 |
+
subprocess.run(cmd)
|
| 491 |
+
|
| 492 |
+
# ------------------------------------------------------------------
|
| 493 |
+
# Rendering (subclasses must implement)
|
| 494 |
+
# ------------------------------------------------------------------
|
| 495 |
+
|
| 496 |
+
def plot_surf(self, *args, **kwargs):
|
| 497 |
+
raise NotImplementedError
|
src/cortexlab/viz/cortical.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import typing as tp
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
import matplotlib
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import numpy as np
|
| 13 |
+
from neuralset.extractors.neuro import FSAVERAGE_SIZES
|
| 14 |
+
from nilearn.datasets import load_fsaverage
|
| 15 |
+
from nilearn.plotting import plot_surf_roi, plot_surf_stat_map
|
| 16 |
+
|
| 17 |
+
from cortexlab.data.loader import get_hcp_roi_indices
|
| 18 |
+
|
| 19 |
+
from .base import BasePlotBrain
|
| 20 |
+
from .utils import get_cmap, get_scalar_mappable, robust_normalize, saturate_colors
|
| 21 |
+
|
| 22 |
+
VIEW_DICT = {
|
| 23 |
+
"left": (0, 180),
|
| 24 |
+
"right": (0, 0),
|
| 25 |
+
"medial_left": (0, 0),
|
| 26 |
+
"medial_right": (0, 180),
|
| 27 |
+
"dorsal": (90, 0),
|
| 28 |
+
"ventral": (-90, 0),
|
| 29 |
+
"anterior": (0, 90),
|
| 30 |
+
"posterior": (0, -90),
|
| 31 |
+
"posterior_left": (0, -135),
|
| 32 |
+
"posterior_right": (0, -45),
|
| 33 |
+
"posterior_ventral": (-45, -90),
|
| 34 |
+
"posterior_ventral_left": (-10, -135),
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class PlotBrainNilearn(BasePlotBrain):
|
| 39 |
+
|
| 40 |
+
VIEW_DICT: tp.ClassVar[dict] = VIEW_DICT
|
| 41 |
+
|
| 42 |
+
def get_fig_axes(self, views):
|
| 43 |
+
if isinstance(views, str):
|
| 44 |
+
views = [views]
|
| 45 |
+
n_rows, n_cols = (1, len(views)) if len(views) <= 4 else (2, len(views) // 2)
|
| 46 |
+
fig, axarr = plt.subplots(
|
| 47 |
+
n_rows,
|
| 48 |
+
n_cols,
|
| 49 |
+
figsize=(2 * n_cols, 2 * n_rows),
|
| 50 |
+
subplot_kw={"projection": "3d"},
|
| 51 |
+
gridspec_kw={"wspace": 0, "hspace": -0.2},
|
| 52 |
+
)
|
| 53 |
+
if len(views) == 1:
|
| 54 |
+
axarr = [axarr]
|
| 55 |
+
else:
|
| 56 |
+
axarr = axarr.flatten()
|
| 57 |
+
return fig, axarr
|
| 58 |
+
|
| 59 |
+
def plot_surf(
|
| 60 |
+
self,
|
| 61 |
+
signals: np.ndarray,
|
| 62 |
+
norm_percentile=None,
|
| 63 |
+
colorbar_title: str | None = None,
|
| 64 |
+
alpha_cmap: tp.Tuple[float, float] | None = None,
|
| 65 |
+
axes: tp.Any | None = None,
|
| 66 |
+
colorbar_kwargs: dict | None = None,
|
| 67 |
+
views: str | list[str] | list[tuple[int, int]] = "left",
|
| 68 |
+
annotated_rois: list[str] | None = None,
|
| 69 |
+
vmin: float | None = None,
|
| 70 |
+
vmax: float | None = None,
|
| 71 |
+
symmetric_cbar: bool = False,
|
| 72 |
+
threshold: float | None = None,
|
| 73 |
+
cmap: str = "hot",
|
| 74 |
+
colorbar: bool = False,
|
| 75 |
+
):
|
| 76 |
+
if isinstance(views, str):
|
| 77 |
+
views = [views]
|
| 78 |
+
if axes is None:
|
| 79 |
+
fig, axarr = self.get_fig_axes(views=views)
|
| 80 |
+
else:
|
| 81 |
+
views, axarr = self.get_axarr_and_views(axes, views)
|
| 82 |
+
fig = None
|
| 83 |
+
|
| 84 |
+
if self.atlas_name is not None:
|
| 85 |
+
signals = self.atlas_to_surf(signals)
|
| 86 |
+
elif signals.ndim == 3:
|
| 87 |
+
signals = self.vol_to_surf(signals)
|
| 88 |
+
assert (
|
| 89 |
+
signals.shape[0] // 2 in FSAVERAGE_SIZES.values()
|
| 90 |
+
), f"Incoherent number of vertices: {signals.shape[0]}"
|
| 91 |
+
if norm_percentile is not None:
|
| 92 |
+
signals = robust_normalize(signals, percentile=norm_percentile)
|
| 93 |
+
hemis = self.get_hemis(signals)
|
| 94 |
+
if str(signals.dtype).startswith("int"):
|
| 95 |
+
plot_fn = plot_surf_roi
|
| 96 |
+
for k in hemis:
|
| 97 |
+
hemis[k]["roi_map"] = hemis[k].pop("stat_map")
|
| 98 |
+
sm = None
|
| 99 |
+
else:
|
| 100 |
+
plot_fn = plot_surf_stat_map
|
| 101 |
+
cmap = get_cmap(cmap, alpha_cmap=alpha_cmap)
|
| 102 |
+
sm = get_scalar_mappable(
|
| 103 |
+
signals,
|
| 104 |
+
cmap,
|
| 105 |
+
vmin=vmin,
|
| 106 |
+
vmax=vmax,
|
| 107 |
+
threshold=threshold,
|
| 108 |
+
symmetric_cbar=symmetric_cbar,
|
| 109 |
+
)
|
| 110 |
+
for i, (view, ax) in enumerate(zip(views, axarr)):
|
| 111 |
+
selected_hemi = (
|
| 112 |
+
"left"
|
| 113 |
+
if view in ["left", "medial_left"]
|
| 114 |
+
else "right" if view in ["right", "medial_right"] else "both"
|
| 115 |
+
)
|
| 116 |
+
if isinstance(view, str):
|
| 117 |
+
view = VIEW_DICT[view]
|
| 118 |
+
plot_kwargs = {
|
| 119 |
+
"axes": ax,
|
| 120 |
+
"view": view,
|
| 121 |
+
"figure": fig,
|
| 122 |
+
"bg_on_data": (
|
| 123 |
+
False
|
| 124 |
+
if (alpha_cmap is not None or plot_fn == plot_surf_roi)
|
| 125 |
+
else True
|
| 126 |
+
),
|
| 127 |
+
"cmap": cmap,
|
| 128 |
+
"vmin": vmin,
|
| 129 |
+
"vmax": vmax,
|
| 130 |
+
"threshold": threshold,
|
| 131 |
+
"colorbar": False,
|
| 132 |
+
}
|
| 133 |
+
if plot_fn == plot_surf_stat_map:
|
| 134 |
+
plot_kwargs["symmetric_cbar"] = symmetric_cbar
|
| 135 |
+
plot_fn(**hemis[selected_hemi], **plot_kwargs)
|
| 136 |
+
if annotated_rois is not None:
|
| 137 |
+
self.annotate_rois(ax, annotated_rois, hemi=selected_hemi)
|
| 138 |
+
ax.set_box_aspect(None, zoom=1.4)
|
| 139 |
+
|
| 140 |
+
if colorbar:
|
| 141 |
+
if fig is None:
|
| 142 |
+
cbar = plt.colorbar(
|
| 143 |
+
sm,
|
| 144 |
+
format="{x:0.2f}",
|
| 145 |
+
label=colorbar_title,
|
| 146 |
+
ax=axarr[-1],
|
| 147 |
+
**colorbar_kwargs if colorbar_kwargs is not None else {},
|
| 148 |
+
shrink=0.5,
|
| 149 |
+
)
|
| 150 |
+
else:
|
| 151 |
+
cb_ax = fig.add_axes([0.9, 0.2, 0.02, 0.6])
|
| 152 |
+
cbar = fig.colorbar(
|
| 153 |
+
sm,
|
| 154 |
+
format="{x:0.2f}",
|
| 155 |
+
label=colorbar_title,
|
| 156 |
+
cax=cb_ax,
|
| 157 |
+
**colorbar_kwargs if colorbar_kwargs is not None else {},
|
| 158 |
+
)
|
| 159 |
+
return sm
|
| 160 |
+
|
| 161 |
+
def plot_surf_rgb(
|
| 162 |
+
self,
|
| 163 |
+
signals: tp.List[np.ndarray],
|
| 164 |
+
alpha_signals: np.ndarray | None = None,
|
| 165 |
+
norm_percentile=95,
|
| 166 |
+
alpha_bg=0,
|
| 167 |
+
cmap: tp.Literal["rgb", "rgb_argmax", "tab10"] = "rgb",
|
| 168 |
+
saturation_factor: None | float = None,
|
| 169 |
+
save_path: str | None = None,
|
| 170 |
+
axes: tp.List[matplotlib.axes.Axes] | None = None,
|
| 171 |
+
views: list[str] | list[tuple[int, int]] = ["left"],
|
| 172 |
+
bg_on_data=False,
|
| 173 |
+
):
|
| 174 |
+
if isinstance(views, str):
|
| 175 |
+
views = [views]
|
| 176 |
+
if axes is None:
|
| 177 |
+
fig, axarr = self.get_fig_axes(views=views)
|
| 178 |
+
else:
|
| 179 |
+
views, axarr = self.get_axarr_and_views(axes, views)
|
| 180 |
+
fig = None
|
| 181 |
+
|
| 182 |
+
fsaverage_meshes = load_fsaverage(mesh=self.mesh)
|
| 183 |
+
if self.atlas_name is not None:
|
| 184 |
+
signals = [self.atlas_to_surf(signal) for signal in signals]
|
| 185 |
+
elif signals[0].ndim == 4:
|
| 186 |
+
signals = [self.vol_to_surf(signal) for signal in signals]
|
| 187 |
+
for signal in signals:
|
| 188 |
+
assert (
|
| 189 |
+
signal.shape[0] // 2 in FSAVERAGE_SIZES.values()
|
| 190 |
+
), f"Incoherent number of vertices: {signal.shape[0]//2}"
|
| 191 |
+
hemis = [self.get_hemis(signal) for signal in signals]
|
| 192 |
+
if alpha_signals is not None:
|
| 193 |
+
alpha_hemis = self.get_hemis(alpha_signals)
|
| 194 |
+
data = dict()
|
| 195 |
+
for selected_hemis in ("left", "right", "both"):
|
| 196 |
+
vertices, faces = hemis[0][selected_hemis]["surf_mesh"]
|
| 197 |
+
colors = np.stack(
|
| 198 |
+
[hemi[selected_hemis]["stat_map"] for hemi in hemis], axis=1
|
| 199 |
+
)
|
| 200 |
+
if cmap.startswith("rgb"):
|
| 201 |
+
if len(signals) == 2:
|
| 202 |
+
colors = np.concatenate(
|
| 203 |
+
[colors, np.zeros((colors.shape[0], 1))], axis=1
|
| 204 |
+
)
|
| 205 |
+
assert colors.shape[1] == 3
|
| 206 |
+
if "argmax" in cmap:
|
| 207 |
+
colors = robust_normalize(colors, axis=1, percentile=100)
|
| 208 |
+
func = np.vectorize(lambda color: 0 if color < 1 else 1)
|
| 209 |
+
colors = func(colors)
|
| 210 |
+
if norm_percentile is not None:
|
| 211 |
+
colors = robust_normalize(
|
| 212 |
+
colors, percentile=norm_percentile, two_sided=False
|
| 213 |
+
)
|
| 214 |
+
if saturation_factor is not None:
|
| 215 |
+
colors = saturate_colors(colors, saturation_factor)
|
| 216 |
+
colors = np.concatenate([colors, np.ones((colors.shape[0], 1))], axis=1)
|
| 217 |
+
else:
|
| 218 |
+
indices = np.argmax(colors, axis=1)
|
| 219 |
+
cm = get_cmap(cmap)
|
| 220 |
+
colors = cm(indices - 1)
|
| 221 |
+
colors[indices == 0, :3] = np.zeros_like(colors[indices == 0, :3])
|
| 222 |
+
if alpha_signals is not None:
|
| 223 |
+
alpha = alpha_hemis[selected_hemis]["stat_map"]
|
| 224 |
+
alpha_bg = 1 - alpha[:, None]
|
| 225 |
+
|
| 226 |
+
bg = hemis[0][selected_hemis]["bg_map"]
|
| 227 |
+
cmap_bg = plt.get_cmap("gray_r")
|
| 228 |
+
bg = robust_normalize(bg, percentile=100)
|
| 229 |
+
bg = cmap_bg(bg)
|
| 230 |
+
if bg_on_data:
|
| 231 |
+
colors[:, :3] = colors[:, :3] * bg[:, :3]
|
| 232 |
+
else:
|
| 233 |
+
colors[:, :3] = colors[:, :3] * (1 - alpha_bg) + bg[:, :3] * alpha_bg
|
| 234 |
+
face_colors = np.mean(colors[faces], axis=1)
|
| 235 |
+
data[selected_hemis] = dict(
|
| 236 |
+
vertex_colors=colors,
|
| 237 |
+
face_colors=face_colors,
|
| 238 |
+
vertices=vertices,
|
| 239 |
+
faces=faces,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
for view, ax in zip(views, axarr):
|
| 243 |
+
selected_hemis = (
|
| 244 |
+
"left" if "left" in view else "right" if "right" in view else "both"
|
| 245 |
+
)
|
| 246 |
+
colors = data[selected_hemis]["face_colors"]
|
| 247 |
+
vertices = data[selected_hemis]["vertices"]
|
| 248 |
+
faces = data[selected_hemis]["faces"]
|
| 249 |
+
|
| 250 |
+
p3dcollec = ax.plot_trisurf(
|
| 251 |
+
vertices[:, 0],
|
| 252 |
+
vertices[:, 1],
|
| 253 |
+
vertices[:, 2],
|
| 254 |
+
triangles=faces,
|
| 255 |
+
linewidth=0.1,
|
| 256 |
+
antialiased=False,
|
| 257 |
+
color="white",
|
| 258 |
+
)
|
| 259 |
+
ax.set_box_aspect(None, zoom=1.4)
|
| 260 |
+
limits = [vertices.min(), vertices.max()]
|
| 261 |
+
ax.set_xlim(*limits)
|
| 262 |
+
ax.set_ylim(*limits)
|
| 263 |
+
p3dcollec.set_facecolors(colors)
|
| 264 |
+
ax.set_axis_off()
|
| 265 |
+
ax.view_init(*VIEW_DICT[view])
|
| 266 |
+
if save_path is not None:
|
| 267 |
+
save_path = Path(save_path)
|
| 268 |
+
save_path.parent.mkdir(parents=True, exist_ok=True)
|
| 269 |
+
np.save(save_path.with_suffix(".npy"), colors)
|
| 270 |
+
|
| 271 |
+
return data["both"]["vertex_colors"]
|
| 272 |
+
|
| 273 |
+
def save_gif(self, ax, save_path: str | None = None):
|
| 274 |
+
import matplotlib.animation as animation
|
| 275 |
+
|
| 276 |
+
if save_path is None:
|
| 277 |
+
save_path = "rgb_animation.gif"
|
| 278 |
+
|
| 279 |
+
angles = np.linspace(0, 360, 100, endpoint=False)
|
| 280 |
+
|
| 281 |
+
def animate(i):
|
| 282 |
+
ax.view_init(elev=0, azim=angles[i])
|
| 283 |
+
return (ax,)
|
| 284 |
+
|
| 285 |
+
from matplotlib.animation import FuncAnimation
|
| 286 |
+
|
| 287 |
+
ani = FuncAnimation(ax.figure, animate, frames=len(angles), interval=30)
|
| 288 |
+
writer = animation.PillowWriter(fps=30, bitrate=1800)
|
| 289 |
+
ani.save(save_path, writer=writer)
|
| 290 |
+
|
| 291 |
+
def annotate_rois(
|
| 292 |
+
self,
|
| 293 |
+
ax,
|
| 294 |
+
rois: str | list[str] | dict[str, list[str]],
|
| 295 |
+
hemi: str = "left",
|
| 296 |
+
**kwargs,
|
| 297 |
+
):
|
| 298 |
+
if isinstance(rois, str):
|
| 299 |
+
rois = [rois]
|
| 300 |
+
assert hemi in ["left", "right"]
|
| 301 |
+
data = np.zeros(2 * FSAVERAGE_SIZES[self.mesh])
|
| 302 |
+
vertices = self.get_hemis(data)["both"]["surf_mesh"][0]
|
| 303 |
+
if hemi == "left":
|
| 304 |
+
vertices = vertices[: FSAVERAGE_SIZES[self.mesh]]
|
| 305 |
+
else:
|
| 306 |
+
vertices = vertices[FSAVERAGE_SIZES[self.mesh] :]
|
| 307 |
+
for roi in rois:
|
| 308 |
+
vertex_indices = get_hcp_roi_indices(roi, mesh=self.mesh, hemi=hemi)
|
| 309 |
+
roi_center = vertices[vertex_indices].mean(axis=0)
|
| 310 |
+
roi_name = rois[roi] if isinstance(rois, dict) else roi
|
| 311 |
+
ax.text(roi_center[0], roi_center[1], roi_center[2], roi_name, **kwargs)
|
src/cortexlab/viz/cortical_pv.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import tempfile
|
| 8 |
+
import typing as tp
|
| 9 |
+
|
| 10 |
+
import matplotlib.pyplot as plt
|
| 11 |
+
import numpy as np
|
| 12 |
+
import pyvista as pv
|
| 13 |
+
from neuralset.extractors.neuro import FSAVERAGE_SIZES
|
| 14 |
+
|
| 15 |
+
from cortexlab.data.loader import get_hcp_roi_indices
|
| 16 |
+
|
| 17 |
+
from .base import BasePlotBrain
|
| 18 |
+
from .utils import (
|
| 19 |
+
convert_ax_to_2d,
|
| 20 |
+
get_cmap,
|
| 21 |
+
get_scalar_mappable,
|
| 22 |
+
robust_normalize,
|
| 23 |
+
saturate_colors,
|
| 24 |
+
tight_crop,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
VIEW_DICT = {
|
| 28 |
+
"ventral": ([0, 0, -1], [1, 0, 0]),
|
| 29 |
+
"dorsal": ([0, 0, 1], [0, 1, 0]),
|
| 30 |
+
"left": ([-1, 0, 0], [0, 0, 1]),
|
| 31 |
+
"right": ([1, 0, 0], [0, 0, 1]),
|
| 32 |
+
"anterior": ([0, 1, 0], [0, 0, -1]),
|
| 33 |
+
"posterior": ([0, -1, 0], [0, 0, 1]),
|
| 34 |
+
"medial_left": ([1, 0, 0], [0, 0, 1]),
|
| 35 |
+
"medial_right": ([-1, 0, 0], [0, 0, 1]),
|
| 36 |
+
"posterior_left": ([-1, 0, 0], [0, 0, 1]),
|
| 37 |
+
"posterior_right": ([-1, 0, 0], [0, 0, 1]),
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class PlotBrainPyvista(BasePlotBrain):
|
| 42 |
+
|
| 43 |
+
dpi: int = 3000
|
| 44 |
+
bg_darkness: float = 0
|
| 45 |
+
ambient: float = 0.3
|
| 46 |
+
w_pad: float = 0.03
|
| 47 |
+
h_pad: float = 0.03
|
| 48 |
+
|
| 49 |
+
VIEW_DICT: tp.ClassVar[dict] = VIEW_DICT
|
| 50 |
+
|
| 51 |
+
def _convert_ax(self, ax):
|
| 52 |
+
return convert_ax_to_2d(ax)
|
| 53 |
+
|
| 54 |
+
def annotate_rois(
|
| 55 |
+
self,
|
| 56 |
+
pl: pv.Plotter,
|
| 57 |
+
rois: str | list[str] | dict[str, str],
|
| 58 |
+
hemi: str = "left",
|
| 59 |
+
**kwargs,
|
| 60 |
+
):
|
| 61 |
+
if isinstance(rois, str):
|
| 62 |
+
rois = [rois]
|
| 63 |
+
hemis = ["left", "right"] if hemi == "both" else [hemi]
|
| 64 |
+
n = FSAVERAGE_SIZES[self.mesh]
|
| 65 |
+
for h in hemis:
|
| 66 |
+
verts = self._mesh[h]["coords"]
|
| 67 |
+
for roi in rois:
|
| 68 |
+
idx = get_hcp_roi_indices(roi, mesh=self.mesh, hemi=h)
|
| 69 |
+
if h == "right":
|
| 70 |
+
idx = np.array(idx) - n
|
| 71 |
+
center = verts[idx].mean(axis=0)
|
| 72 |
+
name = rois[roi] if isinstance(rois, dict) else roi
|
| 73 |
+
pl.add_point_labels(
|
| 74 |
+
center.reshape(1, 3),
|
| 75 |
+
[name],
|
| 76 |
+
shape_opacity=0,
|
| 77 |
+
**kwargs,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
def plot_surf(
|
| 81 |
+
self,
|
| 82 |
+
data,
|
| 83 |
+
axes,
|
| 84 |
+
views="left",
|
| 85 |
+
alpha_cmap=None,
|
| 86 |
+
vmin: float | None = None,
|
| 87 |
+
vmax: float | None = None,
|
| 88 |
+
symmetric_cbar: bool = False,
|
| 89 |
+
threshold: float | None = None,
|
| 90 |
+
cmap: str = "hot",
|
| 91 |
+
norm_percentile: float | None = None,
|
| 92 |
+
annotated_rois: str | list[str] | dict | None = None,
|
| 93 |
+
annotated_rois_kwargs: dict | None = None,
|
| 94 |
+
):
|
| 95 |
+
if norm_percentile is not None:
|
| 96 |
+
data = robust_normalize(data, percentile=norm_percentile)
|
| 97 |
+
if isinstance(views, str):
|
| 98 |
+
views = [views]
|
| 99 |
+
views, axes = self.get_axarr_and_views(axes, views)
|
| 100 |
+
cmap = get_cmap(cmap, alpha_cmap=alpha_cmap)
|
| 101 |
+
sm = get_scalar_mappable(
|
| 102 |
+
data,
|
| 103 |
+
cmap,
|
| 104 |
+
vmin=vmin,
|
| 105 |
+
vmax=vmax,
|
| 106 |
+
threshold=threshold,
|
| 107 |
+
symmetric_cbar=symmetric_cbar,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
stat_maps = self.get_stat_map(data)
|
| 111 |
+
|
| 112 |
+
for ax, view in zip(axes, views):
|
| 113 |
+
selected_hemi = (
|
| 114 |
+
"left"
|
| 115 |
+
if view in ["left", "medial_left"]
|
| 116 |
+
else "right" if view in ["right", "medial_right"] else "both"
|
| 117 |
+
)
|
| 118 |
+
mesh = self._mesh[selected_hemi]
|
| 119 |
+
vertices, faces = mesh["coords"], mesh["faces"]
|
| 120 |
+
stat_map = stat_maps[selected_hemi]
|
| 121 |
+
|
| 122 |
+
rgba = sm.to_rgba(stat_map)
|
| 123 |
+
bg_map = mesh["bg_map"]
|
| 124 |
+
bg_norm = (bg_map - bg_map.min()) / (bg_map.max() - bg_map.min() + 1e-8)
|
| 125 |
+
bg_rgb = 1 - np.column_stack(
|
| 126 |
+
[self.bg_darkness + bg_norm * (1 - self.bg_darkness)] * 3
|
| 127 |
+
)
|
| 128 |
+
colors = rgba[:, 3:4] * rgba[:, :3] + (1 - rgba[:, 3:4]) * bg_rgb
|
| 129 |
+
|
| 130 |
+
pv_faces = np.column_stack([np.full(len(faces), 3), faces])
|
| 131 |
+
|
| 132 |
+
ax_size = ax.get_position()
|
| 133 |
+
pl = pv.Plotter(
|
| 134 |
+
window_size=[
|
| 135 |
+
int(ax_size.width * self.dpi),
|
| 136 |
+
int(ax_size.height * self.dpi),
|
| 137 |
+
],
|
| 138 |
+
off_screen=True,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
surf = pv.PolyData(vertices, pv_faces)
|
| 142 |
+
surf.point_data["colors"] = colors
|
| 143 |
+
pl.add_mesh(
|
| 144 |
+
surf,
|
| 145 |
+
scalars="colors",
|
| 146 |
+
rgb=True,
|
| 147 |
+
smooth_shading=True,
|
| 148 |
+
ambient=self.ambient,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
pl.set_background("white")
|
| 152 |
+
vec, up = VIEW_DICT[view]
|
| 153 |
+
pl.view_vector(vec, viewup=up)
|
| 154 |
+
if annotated_rois is not None:
|
| 155 |
+
self.annotate_rois(
|
| 156 |
+
pl,
|
| 157 |
+
annotated_rois,
|
| 158 |
+
**(annotated_rois_kwargs or {}),
|
| 159 |
+
)
|
| 160 |
+
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
|
| 161 |
+
img = pl.screenshot(tmp.name, return_img=True)
|
| 162 |
+
img = tight_crop(img, w_pad=self.w_pad, h_pad=self.h_pad)
|
| 163 |
+
pl.clear()
|
| 164 |
+
ax.axis("off")
|
| 165 |
+
ax.imshow(img, aspect="equal")
|
| 166 |
+
|
| 167 |
+
return sm
|
| 168 |
+
|
| 169 |
+
def plot_surf_rgb(
|
| 170 |
+
self,
|
| 171 |
+
signals: tp.List[np.ndarray],
|
| 172 |
+
alpha_signals: np.ndarray | None = None,
|
| 173 |
+
norm_percentile=95,
|
| 174 |
+
alpha_bg=0,
|
| 175 |
+
cmap: tp.Literal["rgb", "rgb_argmax", "tab10"] = "rgb",
|
| 176 |
+
saturation_factor: None | float = None,
|
| 177 |
+
axes=None,
|
| 178 |
+
views: list[str] = ["left"],
|
| 179 |
+
bg_on_data=False,
|
| 180 |
+
):
|
| 181 |
+
if isinstance(views, str):
|
| 182 |
+
views = [views]
|
| 183 |
+
views, axes = self.get_axarr_and_views(axes, views)
|
| 184 |
+
|
| 185 |
+
if self.atlas_name is not None:
|
| 186 |
+
signals = [self.atlas_to_surf(signal) for signal in signals]
|
| 187 |
+
elif signals[0].ndim == 4:
|
| 188 |
+
signals = [self.vol_to_surf(signal) for signal in signals]
|
| 189 |
+
|
| 190 |
+
hemis = [self.get_hemis(signal) for signal in signals]
|
| 191 |
+
if alpha_signals is not None:
|
| 192 |
+
alpha_hemis = self.get_hemis(alpha_signals)
|
| 193 |
+
|
| 194 |
+
data = dict()
|
| 195 |
+
for selected_hemis in ("left", "right", "both"):
|
| 196 |
+
stat_maps = [hemi[selected_hemis]["stat_map"] for hemi in hemis]
|
| 197 |
+
colors = np.stack(stat_maps, axis=1)
|
| 198 |
+
|
| 199 |
+
if cmap.startswith("rgb"):
|
| 200 |
+
if len(signals) == 2:
|
| 201 |
+
colors = np.concatenate(
|
| 202 |
+
[colors, np.zeros((colors.shape[0], 1))], axis=1
|
| 203 |
+
)
|
| 204 |
+
assert colors.shape[1] == 3
|
| 205 |
+
if "argmax" in cmap:
|
| 206 |
+
colors = robust_normalize(colors, axis=1, percentile=100)
|
| 207 |
+
colors = (colors >= 1).astype(float)
|
| 208 |
+
if norm_percentile is not None:
|
| 209 |
+
colors = robust_normalize(
|
| 210 |
+
colors, percentile=norm_percentile, two_sided=False
|
| 211 |
+
)
|
| 212 |
+
if saturation_factor is not None:
|
| 213 |
+
colors = saturate_colors(colors, saturation_factor)
|
| 214 |
+
colors = np.concatenate([colors, np.ones((colors.shape[0], 1))], axis=1)
|
| 215 |
+
else:
|
| 216 |
+
indices = np.argmax(colors, axis=1)
|
| 217 |
+
cm = get_cmap(cmap)
|
| 218 |
+
colors = cm(indices - 1)
|
| 219 |
+
colors[indices == 0, :3] = 0
|
| 220 |
+
|
| 221 |
+
if alpha_signals is not None:
|
| 222 |
+
alpha = alpha_hemis[selected_hemis]["stat_map"]
|
| 223 |
+
alpha_bg = 1 - alpha[:, None]
|
| 224 |
+
|
| 225 |
+
bg = hemis[0][selected_hemis]["bg_map"]
|
| 226 |
+
cmap_bg = plt.get_cmap("gray_r")
|
| 227 |
+
bg = robust_normalize(bg, percentile=100)
|
| 228 |
+
bg = cmap_bg(bg)
|
| 229 |
+
if bg_on_data:
|
| 230 |
+
colors[:, :3] = colors[:, :3] * bg[:, :3]
|
| 231 |
+
else:
|
| 232 |
+
colors[:, :3] = colors[:, :3] * (1 - alpha_bg) + bg[:, :3] * alpha_bg
|
| 233 |
+
|
| 234 |
+
mesh = self._mesh[selected_hemis]
|
| 235 |
+
data[selected_hemis] = dict(
|
| 236 |
+
vertex_colors=colors,
|
| 237 |
+
vertices=mesh["coords"],
|
| 238 |
+
faces=mesh["faces"],
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
for ax, view in zip(axes, views):
|
| 242 |
+
selected_hemis = (
|
| 243 |
+
"left" if "left" in view else "right" if "right" in view else "both"
|
| 244 |
+
)
|
| 245 |
+
d = data[selected_hemis]
|
| 246 |
+
|
| 247 |
+
pv_faces = np.column_stack([np.full(len(d["faces"]), 3), d["faces"]])
|
| 248 |
+
|
| 249 |
+
ax_size = ax.get_position()
|
| 250 |
+
pl = pv.Plotter(
|
| 251 |
+
window_size=[
|
| 252 |
+
int(ax_size.width * self.dpi),
|
| 253 |
+
int(ax_size.height * self.dpi),
|
| 254 |
+
],
|
| 255 |
+
off_screen=True,
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
surf = pv.PolyData(d["vertices"], pv_faces)
|
| 259 |
+
surf.point_data["colors"] = d["vertex_colors"][:, :3]
|
| 260 |
+
pl.add_mesh(
|
| 261 |
+
surf,
|
| 262 |
+
color="black",
|
| 263 |
+
scalars="colors",
|
| 264 |
+
rgb=True,
|
| 265 |
+
smooth_shading=True,
|
| 266 |
+
ambient=0.3,
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
vec, up = VIEW_DICT[view]
|
| 270 |
+
pl.view_vector(vec, viewup=up)
|
| 271 |
+
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
|
| 272 |
+
img = pl.screenshot(
|
| 273 |
+
tmp.name, return_img=True, transparent_background=True
|
| 274 |
+
)
|
| 275 |
+
img = tight_crop(img, w_pad=self.w_pad, h_pad=self.h_pad)
|
| 276 |
+
pl.clear()
|
| 277 |
+
ax.axis("off")
|
| 278 |
+
ax.imshow(img, aspect="equal")
|
| 279 |
+
|
| 280 |
+
return data["both"]["vertex_colors"]
|
src/cortexlab/viz/subcortical.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
import tempfile
|
| 9 |
+
import typing as tp
|
| 10 |
+
from functools import lru_cache
|
| 11 |
+
|
| 12 |
+
import matplotlib.pyplot as plt
|
| 13 |
+
import nibabel as nib
|
| 14 |
+
import numpy as np
|
| 15 |
+
import pyvista as pv
|
| 16 |
+
import seaborn as sns
|
| 17 |
+
from nilearn import datasets
|
| 18 |
+
from nilearn.surface import vol_to_surf
|
| 19 |
+
from scipy.ndimage import gaussian_filter
|
| 20 |
+
from skimage import measure
|
| 21 |
+
|
| 22 |
+
from cortexlab.viz.utils import (
|
| 23 |
+
get_cmap,
|
| 24 |
+
get_scalar_mappable,
|
| 25 |
+
robust_normalize,
|
| 26 |
+
tight_crop,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@lru_cache()
|
| 31 |
+
def get_subcortical_mask():
|
| 32 |
+
atlas = datasets.fetch_atlas_harvard_oxford("sub-maxprob-thr50-2mm")
|
| 33 |
+
excluded = ["Cortex", "White", "Stem", "Background"]
|
| 34 |
+
selected_indices = [
|
| 35 |
+
i
|
| 36 |
+
for i, label in enumerate(atlas.labels)
|
| 37 |
+
if any([exc.lower() in label.lower() for exc in excluded])
|
| 38 |
+
]
|
| 39 |
+
mask_data = atlas.maps.get_fdata()
|
| 40 |
+
mask_data[np.isin(mask_data, selected_indices)] = 0
|
| 41 |
+
mask = nib.Nifti1Image(mask_data, atlas.maps.affine, atlas.maps.header)
|
| 42 |
+
return mask
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get_subcortical_labels(with_hemi: bool = False):
|
| 46 |
+
excluded = ["Cortex", "White", "Stem", "Background"]
|
| 47 |
+
labels = [
|
| 48 |
+
label
|
| 49 |
+
for label in cached_ho_atlas().labels
|
| 50 |
+
if not any([exc.lower() in label.lower() for exc in excluded])
|
| 51 |
+
]
|
| 52 |
+
if not with_hemi:
|
| 53 |
+
labels = list(
|
| 54 |
+
set(
|
| 55 |
+
[
|
| 56 |
+
label.replace("Left ", "")
|
| 57 |
+
for label in labels
|
| 58 |
+
if label.startswith("Left ")
|
| 59 |
+
]
|
| 60 |
+
)
|
| 61 |
+
)
|
| 62 |
+
return labels
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@lru_cache
|
| 66 |
+
def cached_ho_atlas(resolution: tp.Literal["1mm", "2mm"] = "1mm"):
|
| 67 |
+
return datasets.fetch_atlas_harvard_oxford(f"sub-maxprob-thr50-{resolution}")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_subcortical_roi_indices(roi: str):
|
| 71 |
+
subcortical_mask = copy.deepcopy(get_subcortical_mask())
|
| 72 |
+
data = subcortical_mask.get_fdata()
|
| 73 |
+
data = data[data > 0]
|
| 74 |
+
ho_sub = cached_ho_atlas(resolution="2mm")
|
| 75 |
+
labels = ho_sub.labels
|
| 76 |
+
sel_labels = [label for label in labels if roi.lower() in label.lower()]
|
| 77 |
+
assert sel_labels, f"ROI {roi} not found in atlas"
|
| 78 |
+
sel_indices = [labels.index(label) for label in sel_labels]
|
| 79 |
+
voxel_indices = np.where(np.isin(data, sel_indices))[0]
|
| 80 |
+
return voxel_indices
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def voxel_to_mesh(voxel_scores, label, resolution):
|
| 84 |
+
subcortical_mask = copy.deepcopy(get_subcortical_mask())
|
| 85 |
+
data = subcortical_mask.get_fdata()
|
| 86 |
+
data[data > 0] = voxel_scores
|
| 87 |
+
nii = nib.Nifti1Image(data, subcortical_mask.affine, subcortical_mask.header)
|
| 88 |
+
roi_mask = get_mask(label, resolution)
|
| 89 |
+
mesh = get_mesh(label, resolution)
|
| 90 |
+
return nii_to_mesh(nii, mesh, mask_img=roi_mask)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def nii_to_mesh(nii, mesh, mask_img=None):
|
| 94 |
+
vertices = mesh.points
|
| 95 |
+
faces = mesh.faces.reshape(-1, 4)[:, 1:]
|
| 96 |
+
vertex_vals = vol_to_surf(
|
| 97 |
+
nii,
|
| 98 |
+
surf_mesh=(vertices, faces),
|
| 99 |
+
mask_img=mask_img,
|
| 100 |
+
kind="line",
|
| 101 |
+
depth=np.linspace(-3, 0, 40),
|
| 102 |
+
interpolation="linear",
|
| 103 |
+
)
|
| 104 |
+
return vertex_vals
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@lru_cache()
|
| 108 |
+
def get_mask(label: str, resolution: tp.Literal["1mm", "2mm"] = "1mm"):
|
| 109 |
+
# fetch Harvard-Oxford subcortical atlas
|
| 110 |
+
ho_sub = cached_ho_atlas(resolution=resolution)
|
| 111 |
+
img = ho_sub.maps
|
| 112 |
+
if label == "Cerebellum":
|
| 113 |
+
raise NotImplementedError(
|
| 114 |
+
"Cerebellum atlas (Diedrichsen 2009) is not yet supported. "
|
| 115 |
+
"Provide the atlas path manually."
|
| 116 |
+
)
|
| 117 |
+
img = nib.load(file)
|
| 118 |
+
mask = img.get_fdata() > 0 # merge all lobules automatically
|
| 119 |
+
elif label == "Brain-Stem":
|
| 120 |
+
# subcortical, return hemisphere-specific mesh (default: right)
|
| 121 |
+
idx = ho_sub.labels.index(label)
|
| 122 |
+
mask = img.get_fdata() == idx
|
| 123 |
+
else:
|
| 124 |
+
if "Left" in label or "Right" in label:
|
| 125 |
+
idx = ho_sub.labels.index(label)
|
| 126 |
+
mask = img.get_fdata() == idx
|
| 127 |
+
else:
|
| 128 |
+
# merge left + right
|
| 129 |
+
left_idx = ho_sub.labels.index("Left " + label)
|
| 130 |
+
right_idx = ho_sub.labels.index("Right " + label)
|
| 131 |
+
data = img.get_fdata()
|
| 132 |
+
mask = (data == left_idx) | (data == right_idx)
|
| 133 |
+
|
| 134 |
+
nii_mask = nib.Nifti1Image(mask.astype(float), img.affine, img.header)
|
| 135 |
+
|
| 136 |
+
return nii_mask
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@lru_cache()
|
| 140 |
+
def get_mesh(label: str, resolution: tp.Literal["1mm", "2mm"]):
|
| 141 |
+
"""
|
| 142 |
+
Returns a PyVista mesh for a given label.
|
| 143 |
+
For 'Cerebellum', 'Cerebral Cortex', and 'Brain-Stem', left and right hemispheres are joined.
|
| 144 |
+
For other subcortical labels, returns separate left/right meshes.
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
if label == "Cerebral Cortex":
|
| 148 |
+
fsaverage = datasets.fetch_surf_fsaverage("fsaverage7")
|
| 149 |
+
nii = nib.load(fsaverage.pial_left)
|
| 150 |
+
verts = nii.darrays[0].data
|
| 151 |
+
faces = nii.darrays[1].data
|
| 152 |
+
faces_pv = np.hstack([np.full((faces.shape[0], 1), 3), faces]).astype(np.int32)
|
| 153 |
+
mesh = pv.PolyData(verts, faces_pv)
|
| 154 |
+
return mesh
|
| 155 |
+
|
| 156 |
+
nii_mask = get_mask(label, resolution)
|
| 157 |
+
|
| 158 |
+
# smooth the mask slightly
|
| 159 |
+
volume = gaussian_filter(nii_mask.get_fdata().astype(float), sigma=1)
|
| 160 |
+
|
| 161 |
+
# marching cubes
|
| 162 |
+
verts, faces, normals, values = measure.marching_cubes(volume, level=0.9)
|
| 163 |
+
# Convert voxel coordinates to world/MNI coordinates
|
| 164 |
+
affine = nii_mask.affine
|
| 165 |
+
verts = nib.affines.apply_affine(affine, verts)
|
| 166 |
+
|
| 167 |
+
# convert faces to PyVista format
|
| 168 |
+
faces_pv = np.hstack([np.full((faces.shape[0], 1), 3), faces]).astype(np.int32)
|
| 169 |
+
|
| 170 |
+
# create PyVista mesh
|
| 171 |
+
mesh = pv.PolyData(verts, faces_pv)
|
| 172 |
+
|
| 173 |
+
# smooth the mesh
|
| 174 |
+
mesh = mesh.smooth(n_iter=50, relaxation_factor=0.01)
|
| 175 |
+
|
| 176 |
+
return mesh
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def plot_subcortical(
|
| 180 |
+
ax,
|
| 181 |
+
*,
|
| 182 |
+
colors: dict = None,
|
| 183 |
+
voxel_scores: np.ndarray = None,
|
| 184 |
+
average_per_roi: bool = False,
|
| 185 |
+
norm_percentile: int = None,
|
| 186 |
+
show_cortex: bool = False,
|
| 187 |
+
show_brain_stem: bool = False,
|
| 188 |
+
show_cerebellum: bool = False,
|
| 189 |
+
explode: float = 0.5,
|
| 190 |
+
resolution: tp.Literal["1mm", "2mm"] = "1mm",
|
| 191 |
+
show_scalar_bar: bool = False,
|
| 192 |
+
zoom: float = 1.3,
|
| 193 |
+
azimuth: float = 15,
|
| 194 |
+
elevation: float = -10,
|
| 195 |
+
intensity: float = 1.5,
|
| 196 |
+
vmin: float | None = None,
|
| 197 |
+
vmax: float | None = None,
|
| 198 |
+
symmetric_cbar: bool = False,
|
| 199 |
+
threshold: float | None = None,
|
| 200 |
+
cmap: str = "hot",
|
| 201 |
+
alpha_cmap: tuple[float, float] = None,
|
| 202 |
+
**plot_kwargs,
|
| 203 |
+
):
|
| 204 |
+
assert (colors is not None) ^ (
|
| 205 |
+
voxel_scores is not None
|
| 206 |
+
), "Either colors voxel_scores must be provided"
|
| 207 |
+
labels = get_subcortical_labels(with_hemi=True)
|
| 208 |
+
if colors is not None:
|
| 209 |
+
assert isinstance(colors, dict), "Colors must be a dictionary"
|
| 210 |
+
if voxel_scores is not None:
|
| 211 |
+
assert voxel_scores.ndim in [1, 2], "voxel_scores must be a 1D or 2D array"
|
| 212 |
+
if average_per_roi:
|
| 213 |
+
for label in labels:
|
| 214 |
+
indices = get_subcortical_roi_indices(label)
|
| 215 |
+
voxel_scores[indices] = voxel_scores[indices].mean()
|
| 216 |
+
if norm_percentile:
|
| 217 |
+
voxel_scores = robust_normalize(voxel_scores, percentile=norm_percentile)
|
| 218 |
+
if show_cerebellum:
|
| 219 |
+
labels.append("Cerebellum")
|
| 220 |
+
if show_cortex:
|
| 221 |
+
labels.append("Cerebral Cortex")
|
| 222 |
+
if show_brain_stem:
|
| 223 |
+
labels.append("Brain-Stem")
|
| 224 |
+
plotter = pv.Plotter(lighting="none")
|
| 225 |
+
rgb = False
|
| 226 |
+
cmap = get_cmap(cmap, alpha_cmap=alpha_cmap)
|
| 227 |
+
sm = get_scalar_mappable(
|
| 228 |
+
voxel_scores,
|
| 229 |
+
cmap,
|
| 230 |
+
vmin=vmin,
|
| 231 |
+
vmax=vmax,
|
| 232 |
+
threshold=threshold,
|
| 233 |
+
symmetric_cbar=symmetric_cbar,
|
| 234 |
+
)
|
| 235 |
+
for label in labels:
|
| 236 |
+
mesh = get_mesh(label, resolution)
|
| 237 |
+
if label in ["Cerebral Cortex", "Brain-Stem"]:
|
| 238 |
+
color = plt.cm.gray(0.8)
|
| 239 |
+
else:
|
| 240 |
+
if colors is not None:
|
| 241 |
+
color = colors[label]
|
| 242 |
+
scalars = None
|
| 243 |
+
else:
|
| 244 |
+
assert voxel_scores is not None
|
| 245 |
+
color = plt.cm.gray(0.8)
|
| 246 |
+
if voxel_scores.ndim == 1:
|
| 247 |
+
scalars = voxel_to_mesh(voxel_scores, label, resolution)
|
| 248 |
+
scalars = sm.to_rgba(scalars)
|
| 249 |
+
rgb = True
|
| 250 |
+
elif voxel_scores.ndim == 2:
|
| 251 |
+
assert voxel_scores.shape[0] == 3
|
| 252 |
+
scalars = np.stack(
|
| 253 |
+
[
|
| 254 |
+
voxel_to_mesh(voxel_scores, label, resolution)
|
| 255 |
+
for voxel_scores in voxel_scores
|
| 256 |
+
],
|
| 257 |
+
axis=1,
|
| 258 |
+
)
|
| 259 |
+
rgb = True
|
| 260 |
+
exploded_points = copy.deepcopy(mesh.points)
|
| 261 |
+
if label == "Cerebral Cortex":
|
| 262 |
+
exploded_points[:, 0] = (
|
| 263 |
+
exploded_points[:, 0] + explode * exploded_points[:, 0].mean()
|
| 264 |
+
)
|
| 265 |
+
else:
|
| 266 |
+
exploded_points[:, 2] = (
|
| 267 |
+
exploded_points[:, 2] + explode * exploded_points.mean(axis=0)[2]
|
| 268 |
+
)
|
| 269 |
+
exploded_mesh = pv.PolyData(exploded_points, mesh.faces)
|
| 270 |
+
plotter.add_mesh(
|
| 271 |
+
exploded_mesh,
|
| 272 |
+
color=color,
|
| 273 |
+
scalars=scalars,
|
| 274 |
+
rgb=rgb,
|
| 275 |
+
show_scalar_bar=show_scalar_bar,
|
| 276 |
+
)
|
| 277 |
+
plotter.window_size = [300, 300]
|
| 278 |
+
plotter.camera.zoom(zoom)
|
| 279 |
+
plotter.camera.azimuth = azimuth
|
| 280 |
+
plotter.camera.elevation = elevation
|
| 281 |
+
light = pv.Light(intensity=intensity)
|
| 282 |
+
light.set_headlight()
|
| 283 |
+
plotter.add_light(light)
|
| 284 |
+
|
| 285 |
+
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
|
| 286 |
+
img = plotter.screenshot(tmp.name, return_img=True)
|
| 287 |
+
img = tight_crop(img)
|
| 288 |
+
ax.imshow(img)
|
| 289 |
+
ax.axis("off")
|
| 290 |
+
return sm
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
if __name__ == "__main__":
|
| 294 |
+
|
| 295 |
+
labels = get_subcortical_labels(with_hemi=False)
|
| 296 |
+
palette = sns.color_palette("Set1", n_colors=len(labels))
|
| 297 |
+
colors = {
|
| 298 |
+
f"{hemi} {label}": palette[i]
|
| 299 |
+
for i, label in enumerate(labels)
|
| 300 |
+
for hemi in ["Left", "Right"]
|
| 301 |
+
}
|
| 302 |
+
plotter = plot_subcortical(
|
| 303 |
+
colors=colors,
|
| 304 |
+
average_per_roi=True,
|
| 305 |
+
cmap="fire",
|
| 306 |
+
show_cerebellum=False,
|
| 307 |
+
explode=1,
|
| 308 |
+
resolution="1mm",
|
| 309 |
+
zoom=1.3,
|
| 310 |
+
)
|
| 311 |
+
plt.show()
|
src/cortexlab/viz/utils.py
ADDED
|
@@ -0,0 +1,563 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
import math
|
| 8 |
+
import re
|
| 9 |
+
from functools import reduce
|
| 10 |
+
|
| 11 |
+
import colorcet
|
| 12 |
+
import matplotlib
|
| 13 |
+
import matplotlib.pyplot as plt
|
| 14 |
+
import numpy as np
|
| 15 |
+
import seaborn as sns
|
| 16 |
+
from matplotlib.colors import LinearSegmentedColormap
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def robust_normalize(
|
| 20 |
+
array, axis=None, percentile=99, clip=True, final_range=None, two_sided=True
|
| 21 |
+
):
|
| 22 |
+
"""Normalize the input array using statistics robust to outliers."""
|
| 23 |
+
hi = np.percentile(array, percentile, axis=axis, keepdims=True)
|
| 24 |
+
if two_sided:
|
| 25 |
+
lo = np.percentile(array, 100 - percentile, axis=axis, keepdims=True)
|
| 26 |
+
else:
|
| 27 |
+
lo = np.min(array, axis=axis, keepdims=True)
|
| 28 |
+
out = (array - lo) / (hi - lo)
|
| 29 |
+
if clip:
|
| 30 |
+
out = np.clip(out, 0, 1)
|
| 31 |
+
if final_range is not None:
|
| 32 |
+
if final_range == "original":
|
| 33 |
+
final_range = (lo, hi)
|
| 34 |
+
out = out * (final_range[1] - final_range[0]) + final_range[0]
|
| 35 |
+
return out
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_scalar_mappable(
|
| 39 |
+
data,
|
| 40 |
+
cmap,
|
| 41 |
+
vmin=None,
|
| 42 |
+
vmax=None,
|
| 43 |
+
symmetric_cbar=False,
|
| 44 |
+
threshold=None,
|
| 45 |
+
alpha_cmap=None,
|
| 46 |
+
):
|
| 47 |
+
vmin = vmin if vmin is not None else np.nanmin(data)
|
| 48 |
+
vmax = vmax if vmax is not None else np.nanmax(data)
|
| 49 |
+
if symmetric_cbar:
|
| 50 |
+
vmin, vmax = -vmax, vmax
|
| 51 |
+
sm = get_thresholded_sm(vmin, vmax, threshold=threshold, cmap=cmap)
|
| 52 |
+
return sm
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def get_thresholded_sm(vmin, vmax, threshold=None, cmap=None):
|
| 56 |
+
|
| 57 |
+
if cmap is None:
|
| 58 |
+
cmap = matplotlib.cm.get_cmap("hot")
|
| 59 |
+
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
|
| 60 |
+
cmaplist = [cmap(i) for i in range(cmap.N)]
|
| 61 |
+
|
| 62 |
+
# set colors to gray for absolute values < threshold
|
| 63 |
+
if threshold is not None:
|
| 64 |
+
istart = int(norm(-threshold, clip=True) * (cmap.N - 1))
|
| 65 |
+
istop = int(norm(threshold, clip=True) * (cmap.N - 1))
|
| 66 |
+
for i in range(istart, istop):
|
| 67 |
+
cmaplist[i] = (0.5, 0.5, 0.5, 1.0)
|
| 68 |
+
our_cmap = LinearSegmentedColormap.from_list("Custom cmap", cmaplist, cmap.N)
|
| 69 |
+
sm = plt.cm.ScalarMappable(cmap=our_cmap, norm=norm)
|
| 70 |
+
|
| 71 |
+
# fake up the array of the scalar mappable.
|
| 72 |
+
sm._A = []
|
| 73 |
+
|
| 74 |
+
return sm
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_pval_stars(pval: float):
|
| 78 |
+
if pval < 0.0005:
|
| 79 |
+
return "***"
|
| 80 |
+
elif pval < 0.005:
|
| 81 |
+
return "**"
|
| 82 |
+
elif pval < 0.05:
|
| 83 |
+
return "*"
|
| 84 |
+
else:
|
| 85 |
+
return ""
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def saturate_colors(rgb: np.ndarray, factor: float):
|
| 89 |
+
"""
|
| 90 |
+
rgb: tuple/list/array of (R, G, B) in 0-1 range
|
| 91 |
+
factor: >1 boosts saturation, 1 leaves unchanged, 0 makes gray
|
| 92 |
+
"""
|
| 93 |
+
rgb = np.array(rgb, dtype=float)
|
| 94 |
+
|
| 95 |
+
# Compute luminance (perceptual gray)
|
| 96 |
+
# Using Rec.709 coefficients for a fairly natural grayscale
|
| 97 |
+
grayscale_coeffs = np.array([0.2126, 0.7152, 0.0722])
|
| 98 |
+
if rgb.ndim == 1:
|
| 99 |
+
lum = np.dot(grayscale_coeffs, rgb)
|
| 100 |
+
elif rgb.ndim == 2:
|
| 101 |
+
lum = np.dot(grayscale_coeffs, rgb.T)
|
| 102 |
+
lum = lum[:, None].repeat(3, axis=1)
|
| 103 |
+
else:
|
| 104 |
+
raise ValueError(f"Invalid number of dimensions: {rgb.ndim}")
|
| 105 |
+
|
| 106 |
+
# Pull or push the channels relative to gray
|
| 107 |
+
new_rgb = lum + factor * (rgb - lum)
|
| 108 |
+
|
| 109 |
+
# Clamp to 0–1
|
| 110 |
+
new_rgb = np.clip(new_rgb, 0, 1)
|
| 111 |
+
return new_rgb
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def get_alpha_cmap(cmap, threshold: float = 0, scale: float = 1, symmetric=False):
|
| 115 |
+
"""
|
| 116 |
+
Takes a cmap and makes it transparent below a threshold.
|
| 117 |
+
Transparency is linearly scaled between threshold and threshold + scale.
|
| 118 |
+
"""
|
| 119 |
+
assert 0 <= threshold <= 1
|
| 120 |
+
from matplotlib.colors import ListedColormap
|
| 121 |
+
|
| 122 |
+
n_points = 1024
|
| 123 |
+
new_cmap = cmap(np.linspace(0, 1, n_points))
|
| 124 |
+
alpha = np.zeros_like(new_cmap[:, 3])
|
| 125 |
+
# zeros before min, ramp 0 to 1 between min and max, 1 after max
|
| 126 |
+
min_idx = int(threshold * (n_points - 1))
|
| 127 |
+
max_idx = int((threshold + scale) * (n_points - 1))
|
| 128 |
+
ramp = np.linspace(0, 1, max_idx - min_idx)
|
| 129 |
+
alpha[min_idx : min(max_idx, n_points)] = ramp[: min(max_idx, n_points) - min_idx]
|
| 130 |
+
alpha[min(max_idx, n_points) :] = 1
|
| 131 |
+
# alpha[max_idx:] = 1
|
| 132 |
+
if symmetric:
|
| 133 |
+
alpha = np.concatenate([alpha[::-2], alpha[::2]])
|
| 134 |
+
new_cmap[:, 3] = alpha
|
| 135 |
+
new_cmap = ListedColormap(new_cmap)
|
| 136 |
+
return new_cmap
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def get_cmap(
|
| 140 |
+
cmap_name: str | matplotlib.colors.Colormap,
|
| 141 |
+
alpha_cmap: tuple[float, float] | None = None,
|
| 142 |
+
):
|
| 143 |
+
if isinstance(cmap_name, str):
|
| 144 |
+
cmap = (
|
| 145 |
+
getattr(matplotlib.cm, cmap_name, None)
|
| 146 |
+
or getattr(sns.cm, cmap_name, None)
|
| 147 |
+
or getattr(colorcet.cm, cmap_name, None)
|
| 148 |
+
)
|
| 149 |
+
else:
|
| 150 |
+
cmap = cmap_name
|
| 151 |
+
if not cmap:
|
| 152 |
+
raise ValueError(f"Invalid cmap: {cmap}")
|
| 153 |
+
if alpha_cmap is not None:
|
| 154 |
+
threshold, scale = alpha_cmap
|
| 155 |
+
cmap = get_alpha_cmap(
|
| 156 |
+
cmap,
|
| 157 |
+
threshold=threshold,
|
| 158 |
+
scale=scale,
|
| 159 |
+
symmetric=(cmap_name in ["seismic", "bwr"]),
|
| 160 |
+
)
|
| 161 |
+
return cmap
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def convert_ax_to_3d(ax):
|
| 165 |
+
if hasattr(ax, "view_init"):
|
| 166 |
+
return ax
|
| 167 |
+
pos = ax.get_position()
|
| 168 |
+
# subplotspec = ax.get_subplotspec()
|
| 169 |
+
ax3d = ax.figure.add_axes(pos, projection="3d")
|
| 170 |
+
# ax3d.set_position(pos)
|
| 171 |
+
ax.remove()
|
| 172 |
+
return ax3d
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def convert_ax_to_2d(ax):
|
| 176 |
+
pos = ax.get_position()
|
| 177 |
+
ax2d = ax.figure.add_axes(pos)
|
| 178 |
+
ax.remove()
|
| 179 |
+
return ax2d
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def lcm(a, b):
|
| 183 |
+
return a * b // math.gcd(a, b) if a and b else max(a, b)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def _lcm_list(lst):
|
| 187 |
+
return reduce(lcm, lst, 1)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def _repeat_chars(line, times):
|
| 191 |
+
return "".join(c * times for c in line)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _transpose(block):
|
| 195 |
+
if not block:
|
| 196 |
+
return []
|
| 197 |
+
max_len = max(len(row) for row in block)
|
| 198 |
+
block = [row.ljust(max_len) for row in block]
|
| 199 |
+
return ["".join(block[r][c] for r in range(len(block))) for c in range(max_len)]
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _check_unique_letters(*blocks):
|
| 203 |
+
"""
|
| 204 |
+
Ensure all blocks have unique letters across blocks.
|
| 205 |
+
Raises an AssertionError if any letter appears in more than one block.
|
| 206 |
+
"""
|
| 207 |
+
unique = set()
|
| 208 |
+
for i, block in enumerate(blocks, 1):
|
| 209 |
+
letters = set(block.replace("\n", ""))
|
| 210 |
+
assert not (
|
| 211 |
+
letters & unique
|
| 212 |
+
), f"Duplicate letters found in block {i}: {letters & unique}"
|
| 213 |
+
unique.update(letters)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def _format_block(mosaic: str) -> str:
|
| 217 |
+
return mosaic.replace(" ", "").lstrip("\n").rstrip("\n")
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def combine_mosaics(*blocks, ratio=None, orient="v"):
|
| 221 |
+
|
| 222 |
+
if len(blocks) < 2:
|
| 223 |
+
raise ValueError("Need at least two blocks to combine")
|
| 224 |
+
|
| 225 |
+
_check_unique_letters(*blocks)
|
| 226 |
+
blocks = [_format_block(block) for block in blocks]
|
| 227 |
+
|
| 228 |
+
# Normalize input
|
| 229 |
+
blocks_lines = [block.split("\n") for block in blocks]
|
| 230 |
+
|
| 231 |
+
# Normalize ratio
|
| 232 |
+
if ratio is None:
|
| 233 |
+
ratios = [1.0] * len(blocks_lines)
|
| 234 |
+
else:
|
| 235 |
+
try:
|
| 236 |
+
ratios = list(ratio)
|
| 237 |
+
if len(ratios) != len(blocks_lines):
|
| 238 |
+
raise ValueError
|
| 239 |
+
except Exception:
|
| 240 |
+
ratios = [float(ratio)] * len(blocks_lines)
|
| 241 |
+
|
| 242 |
+
# Transpose if horizontal
|
| 243 |
+
transposed = False
|
| 244 |
+
if orient == "v":
|
| 245 |
+
blocks_lines = [_transpose(b) for b in blocks_lines]
|
| 246 |
+
transposed = True
|
| 247 |
+
|
| 248 |
+
# Horizontal expansion (columns)
|
| 249 |
+
cols_list = [max(len(line) for line in b) if b else 0 for b in blocks_lines]
|
| 250 |
+
Lw = _lcm_list(cols_list)
|
| 251 |
+
blocks_expanded = []
|
| 252 |
+
for b, c, r in zip(blocks_lines, cols_list, ratios):
|
| 253 |
+
b = [line.ljust(c) for line in b]
|
| 254 |
+
h = max(1, int(round(Lw / c * r)))
|
| 255 |
+
blocks_expanded.append([_repeat_chars(line, h) for line in b])
|
| 256 |
+
|
| 257 |
+
# Vertical expansion (rows)
|
| 258 |
+
rows_list = [len(b) for b in blocks_expanded]
|
| 259 |
+
Lh = _lcm_list(rows_list)
|
| 260 |
+
blocks_tiled = []
|
| 261 |
+
for b, r in zip(blocks_expanded, ratios):
|
| 262 |
+
v = max(1, int(round(Lh / len(b))))
|
| 263 |
+
blocks_tiled.append([line for line in b for _ in range(v)])
|
| 264 |
+
|
| 265 |
+
# Combine all blocks
|
| 266 |
+
combined = ["".join(lines) for lines in zip(*blocks_tiled)]
|
| 267 |
+
|
| 268 |
+
# Transpose back if needed
|
| 269 |
+
if transposed:
|
| 270 |
+
combined = _transpose(combined)
|
| 271 |
+
|
| 272 |
+
return _format_block("\n".join(combined))
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def plot_colorbar(
|
| 276 |
+
ax,
|
| 277 |
+
sm=None,
|
| 278 |
+
cmap=colorcet.cm.fire,
|
| 279 |
+
vmin=0,
|
| 280 |
+
vmax=1,
|
| 281 |
+
label="R",
|
| 282 |
+
label_orientation="vertical",
|
| 283 |
+
orientation="vertical",
|
| 284 |
+
**kwargs,
|
| 285 |
+
):
|
| 286 |
+
|
| 287 |
+
# Hide the axis background, ticks, and spines
|
| 288 |
+
ax.set_frame_on(False)
|
| 289 |
+
ax.set_xticks([])
|
| 290 |
+
ax.set_yticks([])
|
| 291 |
+
|
| 292 |
+
# Create a ScalarMappable for the colorbar
|
| 293 |
+
if sm is None:
|
| 294 |
+
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
|
| 295 |
+
sm = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
|
| 296 |
+
sm.set_array([]) # Required for colorbar
|
| 297 |
+
|
| 298 |
+
# Draw the colorbar inside the given axis
|
| 299 |
+
cbar = plt.colorbar(sm, cax=ax, orientation=orientation, **kwargs)
|
| 300 |
+
|
| 301 |
+
# Set the label if provided
|
| 302 |
+
if label is not None:
|
| 303 |
+
rotation = 0 if label_orientation == "horizontal" else 90
|
| 304 |
+
cbar.set_label(label, rotation=rotation, labelpad=5)
|
| 305 |
+
|
| 306 |
+
# Add border by setting the color and linewidth of all spines
|
| 307 |
+
rect = matplotlib.patches.Rectangle(
|
| 308 |
+
(0, 0),
|
| 309 |
+
1,
|
| 310 |
+
1,
|
| 311 |
+
transform=cbar.ax.transAxes,
|
| 312 |
+
fill=False,
|
| 313 |
+
edgecolor="k",
|
| 314 |
+
linewidth=0.5,
|
| 315 |
+
clip_on=False,
|
| 316 |
+
)
|
| 317 |
+
# cbar.ax.add_patch(rect)
|
| 318 |
+
return cbar
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def shrink_ax(ax, shrink=0.1, horizontally=True, vertically=True):
|
| 322 |
+
pos = ax.get_position()
|
| 323 |
+
# shrink from all sides
|
| 324 |
+
horizontal_shrink = pos.width * shrink if horizontally else 0
|
| 325 |
+
vertical_shrink = pos.height * shrink if vertically else 0
|
| 326 |
+
new_pos = [
|
| 327 |
+
pos.x0 + horizontal_shrink / 2,
|
| 328 |
+
pos.y0 + vertical_shrink / 2,
|
| 329 |
+
pos.width - horizontal_shrink,
|
| 330 |
+
pos.height - vertical_shrink,
|
| 331 |
+
]
|
| 332 |
+
ax.set_position(new_pos)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def move_ax(ax, x=0, y=0):
|
| 336 |
+
pos = ax.get_position()
|
| 337 |
+
up = y * pos.height
|
| 338 |
+
right = x * pos.width
|
| 339 |
+
new_pos = [
|
| 340 |
+
pos.x0 + right,
|
| 341 |
+
pos.y0 + up,
|
| 342 |
+
pos.width,
|
| 343 |
+
pos.height,
|
| 344 |
+
]
|
| 345 |
+
ax.set_position(new_pos)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def label_ax(
|
| 349 |
+
ax,
|
| 350 |
+
label,
|
| 351 |
+
x_offset=0,
|
| 352 |
+
y_offset=0.03,
|
| 353 |
+
fontsize=14,
|
| 354 |
+
fontweight="bold",
|
| 355 |
+
facecolor="none",
|
| 356 |
+
edgecolor="none",
|
| 357 |
+
):
|
| 358 |
+
pos = ax.get_position()
|
| 359 |
+
fig = ax.get_figure()
|
| 360 |
+
fig.text(
|
| 361 |
+
pos.x0 + x_offset,
|
| 362 |
+
pos.y1 + y_offset,
|
| 363 |
+
label,
|
| 364 |
+
fontsize=fontsize,
|
| 365 |
+
fontweight=fontweight,
|
| 366 |
+
ha="center",
|
| 367 |
+
va="center",
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def set_title(axes, title, x_offset=0, y_offset=0, **kwargs):
|
| 372 |
+
if not isinstance(axes, list):
|
| 373 |
+
axes = [axes]
|
| 374 |
+
centers = [(ax.get_position().x0 + ax.get_position().x1) / 2 for ax in axes]
|
| 375 |
+
x = np.mean(centers)
|
| 376 |
+
x = x + x_offset
|
| 377 |
+
y = axes[0].get_position().y1 + y_offset
|
| 378 |
+
fig = axes[0].get_figure()
|
| 379 |
+
if not "ha" in kwargs:
|
| 380 |
+
kwargs["ha"] = "center"
|
| 381 |
+
if not "va" in kwargs:
|
| 382 |
+
kwargs["va"] = "top"
|
| 383 |
+
fig.text(x, y, title, **kwargs)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def tight_crop(img, bg_color=(255, 255, 255), tol=5, w_pad=0, h_pad=0):
|
| 387 |
+
if img.shape[2] == 4: # alpha channel exists
|
| 388 |
+
alpha = img[..., 3]
|
| 389 |
+
ys, xs = np.where(alpha > 0)
|
| 390 |
+
else:
|
| 391 |
+
bg = np.array(bg_color)
|
| 392 |
+
mask = np.any(np.abs(img[..., :3] - bg) > tol, axis=2)
|
| 393 |
+
ys, xs = np.where(mask)
|
| 394 |
+
|
| 395 |
+
if len(xs) == 0:
|
| 396 |
+
return img # nothing found
|
| 397 |
+
left, right, bottom, top = xs.min(), xs.max(), ys.min(), ys.max()
|
| 398 |
+
w_pad = int(w_pad * (right - left))
|
| 399 |
+
h_pad = int(h_pad * (top - bottom))
|
| 400 |
+
left, bottom = max(0, left - w_pad), max(0, bottom - h_pad)
|
| 401 |
+
right, top = min(img.shape[1], right + w_pad), min(img.shape[0], top + h_pad)
|
| 402 |
+
|
| 403 |
+
return img[bottom : top + 1, left : right + 1]
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def plot_rgb_colorbar(n_cubes=4, alpha=1, labels=["Text", "Audio", "Video"]):
|
| 407 |
+
# Use a dark background to make the colors pop
|
| 408 |
+
# plt.style.use('dark_background')
|
| 409 |
+
fig = plt.figure(figsize=(6, 4))
|
| 410 |
+
ax = fig.add_subplot(111, projection="3d", proj_type="persp", focal_length=0.15)
|
| 411 |
+
|
| 412 |
+
x = np.linspace(0, 1, n_cubes)
|
| 413 |
+
y = np.linspace(0, 1, n_cubes)
|
| 414 |
+
z = np.linspace(0, 1, n_cubes)
|
| 415 |
+
X, Y, Z = np.meshgrid(x, y, z)
|
| 416 |
+
X, Y, Z = np.ravel(X), np.ravel(Y), np.ravel(Z)
|
| 417 |
+
colors = np.array([X, Y, Z]).T
|
| 418 |
+
|
| 419 |
+
size = 0.2
|
| 420 |
+
|
| 421 |
+
for i in range(len(X)):
|
| 422 |
+
ax.bar3d(
|
| 423 |
+
X[i] - size / 2,
|
| 424 |
+
Y[i] - size / 2,
|
| 425 |
+
Z[i] - size / 2,
|
| 426 |
+
size,
|
| 427 |
+
size,
|
| 428 |
+
size,
|
| 429 |
+
color=colors[i],
|
| 430 |
+
alpha=alpha,
|
| 431 |
+
edgecolor="none",
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
# --- AXIS ARROWS (QUINVERS) ---
|
| 435 |
+
# We extend the arrows past the data (to 1.4) to show direction clearly
|
| 436 |
+
arrow_props = dict(arrow_length_ratio=0.1, linewidth=1, pivot="tail")
|
| 437 |
+
ax.quiver(0, 0, 0, 1.4, 0, 0, color="k", **arrow_props)
|
| 438 |
+
ax.quiver(0, 0, 0, 0, 1.4, 0, color="k", **arrow_props)
|
| 439 |
+
ax.quiver(0, 0, 0, 0, 0, 1.4, color="k", **arrow_props)
|
| 440 |
+
|
| 441 |
+
# --- LABELS ---
|
| 442 |
+
# Positioning labels at the tips of the arrows
|
| 443 |
+
pos = 1.5
|
| 444 |
+
ax.text(pos, 0, 0, labels[0], color="red", fontweight="bold", ha="center", va="top")
|
| 445 |
+
ax.text(
|
| 446 |
+
0, pos, 0, labels[1], color="green", fontweight="bold", ha="center", va="top"
|
| 447 |
+
)
|
| 448 |
+
ax.text(0, 0, pos, labels[2], color="blue", fontweight="bold", ha="center")
|
| 449 |
+
|
| 450 |
+
# Remove all background clutter
|
| 451 |
+
ax.set_axis_off()
|
| 452 |
+
ax.set_facecolor((0, 0, 0, 0)) # Transparent pane
|
| 453 |
+
|
| 454 |
+
# view_init: Azimuth -45 degrees keeps the origin cube (black) at the front
|
| 455 |
+
# ax.view_init(elev=-40, azim=-135)
|
| 456 |
+
ax.view_init(elev=45, azim=-135 + 180)
|
| 457 |
+
ax.set_box_aspect(None, zoom=0.85)
|
| 458 |
+
|
| 459 |
+
return fig
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
def get_rainbow_brain(mesh="fsaverage5", hemi="both"):
|
| 463 |
+
import matplotlib.colors as mcolors
|
| 464 |
+
from nilearn.datasets import fetch_surf_fsaverage
|
| 465 |
+
from nilearn.surface import load_surf_mesh
|
| 466 |
+
|
| 467 |
+
fsaverage = fetch_surf_fsaverage(mesh=mesh)
|
| 468 |
+
sphere_l, _ = load_surf_mesh(fsaverage["sphere_left"])
|
| 469 |
+
sphere_r, _ = load_surf_mesh(fsaverage["sphere_right"])
|
| 470 |
+
if hemi == "both":
|
| 471 |
+
coords = np.concatenate([sphere_l, sphere_r], axis=0)
|
| 472 |
+
else:
|
| 473 |
+
coords = sphere_l if hemi == "left" else sphere_r
|
| 474 |
+
x, y, z = coords.T
|
| 475 |
+
|
| 476 |
+
# SYMMETRY LOGIC:
|
| 477 |
+
# On fsaverage, +x is Right, -x is Left.
|
| 478 |
+
# To make them symmetric, we take the absolute value of X
|
| 479 |
+
# or flip the X for the right hemisphere so that 'lateral' is always
|
| 480 |
+
# the same direction relative to the color wheel.
|
| 481 |
+
x_mapped = x if hemi == "left" else -x
|
| 482 |
+
|
| 483 |
+
# Hue based on Longitude (using the corrected X)
|
| 484 |
+
phi = np.arctan2(y, x_mapped)
|
| 485 |
+
hues = (phi + np.pi) / (2 * np.pi)
|
| 486 |
+
|
| 487 |
+
# Value based on Elevation (Z) to make it more distinct
|
| 488 |
+
# (Optional: adds a slight brightness gradient from bottom to top)
|
| 489 |
+
z_norm = (z - z.min()) / (z.max() - z.min() + 1e-8)
|
| 490 |
+
vals = np.clip(0.8 + (z_norm * 0.3), 0, 1)
|
| 491 |
+
|
| 492 |
+
hsv = np.stack([hues, np.ones_like(hues) * 0.9, vals], axis=1)
|
| 493 |
+
return mcolors.hsv_to_rgb(hsv)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
# ---------------------------------------------------------------------------
|
| 497 |
+
# Segment helpers (moved from analyses/utils.py)
|
| 498 |
+
# ---------------------------------------------------------------------------
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def has_video(segment) -> bool:
|
| 502 |
+
return any(e.__class__.__name__ == "Video" for e in segment.ns_events)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def has_audio(segment) -> bool:
|
| 506 |
+
return any(e.__class__.__name__ == "Audio" for e in segment.ns_events)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def get_clip(segment, start_offset=0, stop_offset=0):
|
| 510 |
+
from moviepy import VideoFileClip
|
| 511 |
+
|
| 512 |
+
if not has_video(segment):
|
| 513 |
+
return None
|
| 514 |
+
video = [e for e in segment.ns_events if e.__class__.__name__ == "Video"][0]
|
| 515 |
+
clip = VideoFileClip(video.filepath)
|
| 516 |
+
true_start = video.start - video.offset
|
| 517 |
+
clip = clip.subclipped(
|
| 518 |
+
max(segment.start + start_offset - true_start, 0),
|
| 519 |
+
min(segment.stop + stop_offset - true_start, clip.duration),
|
| 520 |
+
)
|
| 521 |
+
return clip
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def get_audio(segment, start_offset=0, stop_offset=0):
|
| 525 |
+
from moviepy import AudioFileClip
|
| 526 |
+
|
| 527 |
+
if not has_audio(segment):
|
| 528 |
+
return None
|
| 529 |
+
audio = [e for e in segment.ns_events if e.__class__.__name__ == "Audio"][0]
|
| 530 |
+
clip = AudioFileClip(audio.filepath)
|
| 531 |
+
true_start = audio.start - audio.offset
|
| 532 |
+
clip = clip.subclipped(
|
| 533 |
+
max(segment.start + start_offset - true_start, 0),
|
| 534 |
+
min(segment.stop + stop_offset - true_start, clip.duration),
|
| 535 |
+
)
|
| 536 |
+
return clip
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def get_words(segment, filter=(0, 1), remove_punctuation=True, remove_stopwords=False):
|
| 540 |
+
start, duration = segment.start, segment.duration
|
| 541 |
+
clean = (
|
| 542 |
+
(lambda x: re.sub(r"[^\w\s]", "", x)) if remove_punctuation else (lambda x: x)
|
| 543 |
+
)
|
| 544 |
+
words = [
|
| 545 |
+
clean(e.text.lower())
|
| 546 |
+
for e in segment.ns_events
|
| 547 |
+
if e.__class__.__name__ == "Word"
|
| 548 |
+
and filter[0] <= (e.start - start) / duration <= filter[1]
|
| 549 |
+
]
|
| 550 |
+
if remove_stopwords:
|
| 551 |
+
from stopwords import get_stopwords
|
| 552 |
+
|
| 553 |
+
words = [w for w in words if w not in get_stopwords("english")]
|
| 554 |
+
return words
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
def get_text(segment, **kwargs) -> str:
|
| 558 |
+
return " ".join(get_words(segment, **kwargs))
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
if __name__ == "__main__":
|
| 562 |
+
fig = plot_rgb_colorbar()
|
| 563 |
+
plt.show()
|
tests/__init__.py
ADDED
|
File without changes
|
tests/conftest.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Shared test utilities."""
|
| 2 |
+
|
| 3 |
+
import neuralset.segments as seg
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def make_segments(n):
|
| 7 |
+
"""Create n dummy segments for SegmentData construction."""
|
| 8 |
+
return [seg.Segment(start=float(i), duration=1.0, timeline="test") for i in range(n)]
|
tests/test_attention.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for ROI attention extraction."""
|
| 2 |
+
|
| 3 |
+
from unittest.mock import MagicMock
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestAttentionExtractor:
|
| 11 |
+
def test_context_manager_returns_list(self):
|
| 12 |
+
from cortexlab.core.attention import AttentionExtractor
|
| 13 |
+
|
| 14 |
+
encoder = torch.nn.TransformerEncoder(
|
| 15 |
+
torch.nn.TransformerEncoderLayer(d_model=32, nhead=4, batch_first=True),
|
| 16 |
+
num_layers=2,
|
| 17 |
+
)
|
| 18 |
+
with AttentionExtractor(encoder) as maps:
|
| 19 |
+
x = torch.randn(2, 10, 32)
|
| 20 |
+
_ = encoder(x)
|
| 21 |
+
|
| 22 |
+
assert isinstance(maps, list)
|
| 23 |
+
|
| 24 |
+
def test_hooks_cleaned_up(self):
|
| 25 |
+
from cortexlab.core.attention import AttentionExtractor
|
| 26 |
+
|
| 27 |
+
encoder = torch.nn.TransformerEncoder(
|
| 28 |
+
torch.nn.TransformerEncoderLayer(d_model=32, nhead=4, batch_first=True),
|
| 29 |
+
num_layers=2,
|
| 30 |
+
)
|
| 31 |
+
with AttentionExtractor(encoder) as _maps:
|
| 32 |
+
x = torch.randn(1, 5, 32)
|
| 33 |
+
_ = encoder(x)
|
| 34 |
+
|
| 35 |
+
# Hooks should be cleaned up after exiting context
|
| 36 |
+
assert isinstance(_maps, list)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class TestAttentionToRoiScores:
|
| 40 |
+
def test_basic_roi_scores(self):
|
| 41 |
+
from cortexlab.core.attention import attention_to_roi_scores
|
| 42 |
+
|
| 43 |
+
# Simulate 2 layers of attention maps: (B=1, heads=4, T=10, T=10)
|
| 44 |
+
attn_maps = [torch.randn(1, 4, 10, 10) for _ in range(2)]
|
| 45 |
+
roi_indices = {
|
| 46 |
+
"V1": np.array([0, 1, 2]),
|
| 47 |
+
"MT": np.array([5, 6]),
|
| 48 |
+
}
|
| 49 |
+
scores = attention_to_roi_scores(attn_maps, roi_indices)
|
| 50 |
+
|
| 51 |
+
assert "V1" in scores
|
| 52 |
+
assert "MT" in scores
|
| 53 |
+
assert scores["V1"].shape == (10,)
|
| 54 |
+
assert scores["MT"].shape == (10,)
|
| 55 |
+
|
| 56 |
+
def test_with_predictor_weights(self):
|
| 57 |
+
from cortexlab.core.attention import attention_to_roi_scores
|
| 58 |
+
|
| 59 |
+
attn_maps = [torch.randn(1, 4, 10, 10)]
|
| 60 |
+
roi_indices = {
|
| 61 |
+
"V1": np.array([0, 1]),
|
| 62 |
+
"A1": np.array([3, 4]),
|
| 63 |
+
}
|
| 64 |
+
# Predictor weights: (hidden=32, n_vertices=10)
|
| 65 |
+
weights = torch.randn(32, 10)
|
| 66 |
+
scores = attention_to_roi_scores(attn_maps, roi_indices, predictor_weights=weights)
|
| 67 |
+
|
| 68 |
+
assert "V1" in scores
|
| 69 |
+
assert "A1" in scores
|
| 70 |
+
|
| 71 |
+
def test_empty_attn_maps(self):
|
| 72 |
+
from cortexlab.core.attention import attention_to_roi_scores
|
| 73 |
+
|
| 74 |
+
roi_indices = {"V1": np.array([0, 1])}
|
| 75 |
+
scores = attention_to_roi_scores([], roi_indices)
|
| 76 |
+
|
| 77 |
+
assert "V1" in scores
|
| 78 |
+
assert len(scores["V1"]) == 0
|
tests/test_attribution.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for modality attribution."""
|
| 2 |
+
|
| 3 |
+
from unittest.mock import MagicMock
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from tests.conftest import make_segments
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _make_mock_model(n_vertices=100):
|
| 13 |
+
"""Create a mock model for attribution testing."""
|
| 14 |
+
model = MagicMock()
|
| 15 |
+
model.feature_dims = {"text": (2, 32), "audio": (2, 32), "video": (2, 32)}
|
| 16 |
+
model.eval = MagicMock()
|
| 17 |
+
|
| 18 |
+
call_count = [0]
|
| 19 |
+
|
| 20 |
+
def fake_forward(batch, **kwargs):
|
| 21 |
+
call_count[0] += 1
|
| 22 |
+
B = 2
|
| 23 |
+
# Make text-ablated predictions differ more to simulate importance
|
| 24 |
+
if torch.all(batch.data.get("text", torch.ones(1)) == 0):
|
| 25 |
+
return torch.ones(B, n_vertices, 10) * 0.5
|
| 26 |
+
elif torch.all(batch.data.get("audio", torch.ones(1)) == 0):
|
| 27 |
+
return torch.ones(B, n_vertices, 10) * 0.8
|
| 28 |
+
elif torch.all(batch.data.get("video", torch.ones(1)) == 0):
|
| 29 |
+
return torch.ones(B, n_vertices, 10) * 0.9
|
| 30 |
+
return torch.ones(B, n_vertices, 10) * 1.0
|
| 31 |
+
|
| 32 |
+
model.side_effect = fake_forward
|
| 33 |
+
model.return_value = torch.ones(2, n_vertices, 10)
|
| 34 |
+
# Override __call__ to use our function
|
| 35 |
+
model.__class__ = type("MockModel", (), {
|
| 36 |
+
"__call__": staticmethod(fake_forward),
|
| 37 |
+
"feature_dims": {"text": (2, 32), "audio": (2, 32), "video": (2, 32)},
|
| 38 |
+
"eval": lambda self: None,
|
| 39 |
+
})
|
| 40 |
+
# Simpler approach: just use a plain class
|
| 41 |
+
class FakeModel:
|
| 42 |
+
feature_dims = {"text": (2, 32), "audio": (2, 32), "video": (2, 32)}
|
| 43 |
+
def eval(self): pass
|
| 44 |
+
def __call__(self, batch, **kwargs):
|
| 45 |
+
return fake_forward(batch, **kwargs)
|
| 46 |
+
return FakeModel()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class TestModalityAttributor:
|
| 50 |
+
def test_ablation_basic(self):
|
| 51 |
+
from neuralset.dataloader import SegmentData
|
| 52 |
+
|
| 53 |
+
from cortexlab.inference.attribution import ModalityAttributor
|
| 54 |
+
|
| 55 |
+
model = _make_mock_model()
|
| 56 |
+
attributor = ModalityAttributor(model)
|
| 57 |
+
|
| 58 |
+
data = {
|
| 59 |
+
"text": torch.randn(2, 2, 32, 20),
|
| 60 |
+
"audio": torch.randn(2, 2, 32, 20),
|
| 61 |
+
"video": torch.randn(2, 2, 32, 20),
|
| 62 |
+
"subject_id": torch.zeros(2, dtype=torch.long),
|
| 63 |
+
}
|
| 64 |
+
batch = SegmentData(data=data, segments=make_segments(2))
|
| 65 |
+
scores = attributor.attribute(batch)
|
| 66 |
+
|
| 67 |
+
assert "text" in scores
|
| 68 |
+
assert "audio" in scores
|
| 69 |
+
assert "video" in scores
|
| 70 |
+
assert scores["text"].shape == (100,)
|
| 71 |
+
|
| 72 |
+
def test_text_most_important(self):
|
| 73 |
+
from neuralset.dataloader import SegmentData
|
| 74 |
+
|
| 75 |
+
from cortexlab.inference.attribution import ModalityAttributor
|
| 76 |
+
|
| 77 |
+
model = _make_mock_model()
|
| 78 |
+
attributor = ModalityAttributor(model)
|
| 79 |
+
|
| 80 |
+
data = {
|
| 81 |
+
"text": torch.randn(2, 2, 32, 20),
|
| 82 |
+
"audio": torch.randn(2, 2, 32, 20),
|
| 83 |
+
"video": torch.randn(2, 2, 32, 20),
|
| 84 |
+
"subject_id": torch.zeros(2, dtype=torch.long),
|
| 85 |
+
}
|
| 86 |
+
batch = SegmentData(data=data, segments=make_segments(2))
|
| 87 |
+
scores = attributor.attribute(batch)
|
| 88 |
+
|
| 89 |
+
# Text ablation causes the biggest change (1.0 -> 0.5 = 0.5 diff)
|
| 90 |
+
assert scores["text"].mean() > scores["audio"].mean()
|
| 91 |
+
assert scores["audio"].mean() > scores["video"].mean()
|
| 92 |
+
|
| 93 |
+
def test_normalised_scores_sum_to_one(self):
|
| 94 |
+
from neuralset.dataloader import SegmentData
|
| 95 |
+
|
| 96 |
+
from cortexlab.inference.attribution import ModalityAttributor
|
| 97 |
+
|
| 98 |
+
model = _make_mock_model()
|
| 99 |
+
attributor = ModalityAttributor(model)
|
| 100 |
+
|
| 101 |
+
data = {
|
| 102 |
+
"text": torch.randn(2, 2, 32, 20),
|
| 103 |
+
"audio": torch.randn(2, 2, 32, 20),
|
| 104 |
+
"video": torch.randn(2, 2, 32, 20),
|
| 105 |
+
"subject_id": torch.zeros(2, dtype=torch.long),
|
| 106 |
+
}
|
| 107 |
+
batch = SegmentData(data=data, segments=make_segments(2))
|
| 108 |
+
scores = attributor.attribute(batch)
|
| 109 |
+
|
| 110 |
+
total = scores["text_normalised"] + scores["audio_normalised"] + scores["video_normalised"]
|
| 111 |
+
np.testing.assert_allclose(total, 1.0, atol=1e-6)
|
| 112 |
+
|
| 113 |
+
def test_with_roi_indices(self):
|
| 114 |
+
from neuralset.dataloader import SegmentData
|
| 115 |
+
|
| 116 |
+
from cortexlab.inference.attribution import ModalityAttributor
|
| 117 |
+
|
| 118 |
+
roi_indices = {
|
| 119 |
+
"V1": np.array([0, 1, 2, 3, 4]),
|
| 120 |
+
"MT": np.array([10, 11, 12]),
|
| 121 |
+
}
|
| 122 |
+
model = _make_mock_model()
|
| 123 |
+
attributor = ModalityAttributor(model, roi_indices=roi_indices)
|
| 124 |
+
|
| 125 |
+
data = {
|
| 126 |
+
"text": torch.randn(2, 2, 32, 20),
|
| 127 |
+
"audio": torch.randn(2, 2, 32, 20),
|
| 128 |
+
"video": torch.randn(2, 2, 32, 20),
|
| 129 |
+
"subject_id": torch.zeros(2, dtype=torch.long),
|
| 130 |
+
}
|
| 131 |
+
batch = SegmentData(data=data, segments=make_segments(2))
|
| 132 |
+
scores = attributor.attribute(batch)
|
| 133 |
+
|
| 134 |
+
assert "text_roi" in scores
|
| 135 |
+
assert "V1" in scores["text_roi"]
|
| 136 |
+
assert "MT" in scores["text_roi"]
|
tests/test_brain_alignment.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the brain-alignment benchmark."""
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestBrainAlignmentBenchmark:
|
| 8 |
+
def _make_data(self, n_stimuli=20, model_dim=64, n_vertices=100):
|
| 9 |
+
model_features = np.random.randn(n_stimuli, model_dim)
|
| 10 |
+
brain_predictions = np.random.randn(n_stimuli, n_vertices)
|
| 11 |
+
return model_features, brain_predictions
|
| 12 |
+
|
| 13 |
+
def test_rsa_returns_score(self):
|
| 14 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 15 |
+
|
| 16 |
+
model_feat, brain_pred = self._make_data()
|
| 17 |
+
bench = BrainAlignmentBenchmark(brain_pred)
|
| 18 |
+
result = bench.score_model(model_feat, method="rsa")
|
| 19 |
+
assert -1.0 <= result.aggregate_score <= 1.0
|
| 20 |
+
assert result.method == "rsa"
|
| 21 |
+
assert result.n_stimuli == 20
|
| 22 |
+
|
| 23 |
+
def test_cka_returns_score(self):
|
| 24 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 25 |
+
|
| 26 |
+
model_feat, brain_pred = self._make_data()
|
| 27 |
+
bench = BrainAlignmentBenchmark(brain_pred)
|
| 28 |
+
result = bench.score_model(model_feat, method="cka")
|
| 29 |
+
assert isinstance(result.aggregate_score, float)
|
| 30 |
+
assert result.method == "cka"
|
| 31 |
+
|
| 32 |
+
def test_procrustes_returns_score(self):
|
| 33 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 34 |
+
|
| 35 |
+
model_feat, brain_pred = self._make_data()
|
| 36 |
+
bench = BrainAlignmentBenchmark(brain_pred)
|
| 37 |
+
result = bench.score_model(model_feat, method="procrustes")
|
| 38 |
+
assert isinstance(result.aggregate_score, float)
|
| 39 |
+
|
| 40 |
+
def test_identical_features_high_score(self):
|
| 41 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 42 |
+
|
| 43 |
+
data = np.random.randn(30, 50)
|
| 44 |
+
bench = BrainAlignmentBenchmark(data)
|
| 45 |
+
result = bench.score_model(data, method="cka")
|
| 46 |
+
assert result.aggregate_score > 0.95
|
| 47 |
+
|
| 48 |
+
def test_roi_scores(self):
|
| 49 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 50 |
+
|
| 51 |
+
model_feat, brain_pred = self._make_data()
|
| 52 |
+
roi_indices = {
|
| 53 |
+
"V1": np.array([0, 1, 2, 3, 4]),
|
| 54 |
+
"MT": np.array([10, 11, 12, 13, 14]),
|
| 55 |
+
}
|
| 56 |
+
bench = BrainAlignmentBenchmark(brain_pred, roi_indices=roi_indices)
|
| 57 |
+
result = bench.score_model(model_feat, method="rsa")
|
| 58 |
+
assert "V1" in result.roi_scores
|
| 59 |
+
assert "MT" in result.roi_scores
|
| 60 |
+
|
| 61 |
+
def test_roi_filter(self):
|
| 62 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 63 |
+
|
| 64 |
+
model_feat, brain_pred = self._make_data()
|
| 65 |
+
roi_indices = {
|
| 66 |
+
"V1": np.array([0, 1, 2]),
|
| 67 |
+
"MT": np.array([10, 11]),
|
| 68 |
+
"A1": np.array([20, 21]),
|
| 69 |
+
}
|
| 70 |
+
bench = BrainAlignmentBenchmark(brain_pred, roi_indices=roi_indices)
|
| 71 |
+
result = bench.score_model(model_feat, method="rsa", roi_filter=["V1", "A1"])
|
| 72 |
+
assert "V1" in result.roi_scores
|
| 73 |
+
assert "A1" in result.roi_scores
|
| 74 |
+
assert "MT" not in result.roi_scores
|
| 75 |
+
|
| 76 |
+
def test_stimulus_count_mismatch_raises(self):
|
| 77 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 78 |
+
|
| 79 |
+
model_feat = np.random.randn(10, 32)
|
| 80 |
+
brain_pred = np.random.randn(20, 100)
|
| 81 |
+
bench = BrainAlignmentBenchmark(brain_pred)
|
| 82 |
+
with pytest.raises(ValueError, match="Stimulus count mismatch"):
|
| 83 |
+
bench.score_model(model_feat, method="rsa")
|
| 84 |
+
|
| 85 |
+
def test_unknown_method_raises(self):
|
| 86 |
+
from cortexlab.analysis.brain_alignment import BrainAlignmentBenchmark
|
| 87 |
+
|
| 88 |
+
model_feat, brain_pred = self._make_data()
|
| 89 |
+
bench = BrainAlignmentBenchmark(brain_pred)
|
| 90 |
+
with pytest.raises(ValueError, match="Unknown method"):
|
| 91 |
+
bench.score_model(model_feat, method="banana")
|
tests/test_cognitive_load.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the cognitive load scorer."""
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _make_roi_indices():
|
| 8 |
+
"""Create minimal ROI indices for testing."""
|
| 9 |
+
return {
|
| 10 |
+
# Executive
|
| 11 |
+
"46": np.array([0, 1]),
|
| 12 |
+
"FEF": np.array([2, 3]),
|
| 13 |
+
"p32pr": np.array([4]),
|
| 14 |
+
# Visual
|
| 15 |
+
"V1": np.array([10, 11, 12]),
|
| 16 |
+
"V2": np.array([13, 14]),
|
| 17 |
+
"MT": np.array([15, 16]),
|
| 18 |
+
# Auditory
|
| 19 |
+
"A1": np.array([20, 21, 22]),
|
| 20 |
+
"LBelt": np.array([23]),
|
| 21 |
+
# Language
|
| 22 |
+
"44": np.array([30, 31]),
|
| 23 |
+
"45": np.array([32, 33]),
|
| 24 |
+
"TPOJ1": np.array([34]),
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TestCognitiveLoadScorer:
|
| 29 |
+
def test_basic_scoring(self):
|
| 30 |
+
from cortexlab.analysis.cognitive_load import CognitiveLoadScorer
|
| 31 |
+
|
| 32 |
+
roi_indices = _make_roi_indices()
|
| 33 |
+
scorer = CognitiveLoadScorer(roi_indices)
|
| 34 |
+
|
| 35 |
+
# Create predictions with 50 vertices, 10 timepoints
|
| 36 |
+
predictions = np.random.randn(10, 50)
|
| 37 |
+
result = scorer.score_predictions(predictions)
|
| 38 |
+
|
| 39 |
+
assert 0.0 <= result.overall_load <= 1.0
|
| 40 |
+
assert 0.0 <= result.visual_complexity <= 1.0
|
| 41 |
+
assert 0.0 <= result.auditory_demand <= 1.0
|
| 42 |
+
assert 0.0 <= result.language_processing <= 1.0
|
| 43 |
+
assert 0.0 <= result.executive_load <= 1.0
|
| 44 |
+
|
| 45 |
+
def test_timeline_length(self):
|
| 46 |
+
from cortexlab.analysis.cognitive_load import CognitiveLoadScorer
|
| 47 |
+
|
| 48 |
+
roi_indices = _make_roi_indices()
|
| 49 |
+
scorer = CognitiveLoadScorer(roi_indices)
|
| 50 |
+
|
| 51 |
+
predictions = np.random.randn(15, 50)
|
| 52 |
+
result = scorer.score_predictions(predictions, tr_seconds=1.5)
|
| 53 |
+
|
| 54 |
+
assert len(result.timeline) == 15
|
| 55 |
+
# Check timeline timestamps
|
| 56 |
+
assert result.timeline[0][0] == 0.0
|
| 57 |
+
assert abs(result.timeline[1][0] - 1.5) < 1e-6
|
| 58 |
+
|
| 59 |
+
def test_single_timepoint(self):
|
| 60 |
+
from cortexlab.analysis.cognitive_load import CognitiveLoadScorer
|
| 61 |
+
|
| 62 |
+
roi_indices = _make_roi_indices()
|
| 63 |
+
scorer = CognitiveLoadScorer(roi_indices)
|
| 64 |
+
|
| 65 |
+
predictions = np.random.randn(50) # 1D input
|
| 66 |
+
result = scorer.score_predictions(predictions)
|
| 67 |
+
|
| 68 |
+
assert len(result.timeline) == 1
|
| 69 |
+
assert 0.0 <= result.overall_load <= 1.0
|
| 70 |
+
|
| 71 |
+
def test_high_visual_activation(self):
|
| 72 |
+
from cortexlab.analysis.cognitive_load import CognitiveLoadScorer
|
| 73 |
+
|
| 74 |
+
roi_indices = _make_roi_indices()
|
| 75 |
+
scorer = CognitiveLoadScorer(roi_indices, baseline_activation=0.1)
|
| 76 |
+
|
| 77 |
+
# Create predictions with very high activation in visual ROIs
|
| 78 |
+
predictions = np.zeros((5, 50))
|
| 79 |
+
for roi in ["V1", "V2", "MT"]:
|
| 80 |
+
predictions[:, roi_indices[roi]] = 10.0
|
| 81 |
+
|
| 82 |
+
result = scorer.score_predictions(predictions)
|
| 83 |
+
# Visual complexity should be highest (capped at 1.0 due to normalization)
|
| 84 |
+
assert result.visual_complexity > 0.5
|
| 85 |
+
|
| 86 |
+
def test_custom_cognitive_map(self):
|
| 87 |
+
from cortexlab.analysis.cognitive_load import CognitiveLoadScorer
|
| 88 |
+
|
| 89 |
+
roi_indices = {"V1": np.array([0, 1, 2]), "A1": np.array([3, 4])}
|
| 90 |
+
custom_map = {
|
| 91 |
+
"visual_complexity": ["V1"],
|
| 92 |
+
"auditory_demand": ["A1"],
|
| 93 |
+
}
|
| 94 |
+
scorer = CognitiveLoadScorer(roi_indices, cognitive_map=custom_map)
|
| 95 |
+
|
| 96 |
+
predictions = np.random.randn(5, 10)
|
| 97 |
+
result = scorer.score_predictions(predictions)
|
| 98 |
+
|
| 99 |
+
assert result.executive_load == 0.0 # Not in custom map
|
| 100 |
+
assert result.language_processing == 0.0
|
tests/test_model.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the core FmriEncoder model."""
|
| 2 |
+
|
| 3 |
+
from unittest.mock import MagicMock
|
| 4 |
+
|
| 5 |
+
import pytest
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _make_model(hidden=256, n_outputs=100, n_timesteps=10, modalities=None):
|
| 10 |
+
"""Build a small FmriEncoderModel for testing."""
|
| 11 |
+
from neuraltrain.models.transformer import TransformerEncoder
|
| 12 |
+
|
| 13 |
+
from cortexlab.core.model import FmriEncoder
|
| 14 |
+
|
| 15 |
+
if modalities is None:
|
| 16 |
+
modalities = {"text": (2, 32), "audio": (2, 32), "video": (2, 32)}
|
| 17 |
+
|
| 18 |
+
config = FmriEncoder(
|
| 19 |
+
hidden=hidden,
|
| 20 |
+
max_seq_len=128,
|
| 21 |
+
dropout=0.0,
|
| 22 |
+
modality_dropout=0.0,
|
| 23 |
+
temporal_dropout=0.0,
|
| 24 |
+
linear_baseline=False,
|
| 25 |
+
encoder=TransformerEncoder(depth=2, heads=4),
|
| 26 |
+
)
|
| 27 |
+
model = config.build(
|
| 28 |
+
feature_dims=modalities,
|
| 29 |
+
n_outputs=n_outputs,
|
| 30 |
+
n_output_timesteps=n_timesteps,
|
| 31 |
+
)
|
| 32 |
+
return model
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _make_segments(n):
|
| 36 |
+
"""Create dummy segments for SegmentData."""
|
| 37 |
+
import neuralset.segments as seg
|
| 38 |
+
return [seg.Segment(start=float(i), duration=1.0, timeline="test") for i in range(n)]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _make_batch(modalities, batch_size=2, seq_len=20):
|
| 42 |
+
"""Create a synthetic SegmentData-like batch."""
|
| 43 |
+
from neuralset.dataloader import SegmentData
|
| 44 |
+
|
| 45 |
+
data = {}
|
| 46 |
+
for name, (n_layers, feat_dim) in modalities.items():
|
| 47 |
+
data[name] = torch.randn(batch_size, n_layers, feat_dim, seq_len)
|
| 48 |
+
data["subject_id"] = torch.zeros(batch_size, dtype=torch.long)
|
| 49 |
+
return SegmentData(data=data, segments=_make_segments(batch_size))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class TestFmriEncoderModel:
|
| 53 |
+
def test_forward_shape(self):
|
| 54 |
+
modalities = {"text": (2, 32), "audio": (2, 32)}
|
| 55 |
+
model = _make_model(modalities=modalities)
|
| 56 |
+
batch = _make_batch(modalities)
|
| 57 |
+
out = model(batch)
|
| 58 |
+
assert out.shape == (2, 100, 10), f"Expected (2, 100, 10), got {out.shape}"
|
| 59 |
+
|
| 60 |
+
def test_forward_no_pool(self):
|
| 61 |
+
modalities = {"text": (2, 32)}
|
| 62 |
+
model = _make_model(modalities=modalities)
|
| 63 |
+
batch = _make_batch(modalities)
|
| 64 |
+
out = model(batch, pool_outputs=False)
|
| 65 |
+
assert out.shape[0] == 2
|
| 66 |
+
assert out.shape[1] == 100
|
| 67 |
+
|
| 68 |
+
def test_return_attn(self):
|
| 69 |
+
modalities = {"text": (2, 32)}
|
| 70 |
+
model = _make_model(modalities=modalities)
|
| 71 |
+
batch = _make_batch(modalities)
|
| 72 |
+
result = model(batch, return_attn=True)
|
| 73 |
+
assert isinstance(result, tuple)
|
| 74 |
+
out, attn_maps = result
|
| 75 |
+
assert out.shape == (2, 100, 10)
|
| 76 |
+
# attn_maps may be empty if the transformer doesn't expose weights
|
| 77 |
+
assert isinstance(attn_maps, list)
|
| 78 |
+
|
| 79 |
+
def test_missing_modality_zeros(self):
|
| 80 |
+
modalities = {"text": (2, 32), "audio": (2, 32)}
|
| 81 |
+
model = _make_model(modalities=modalities)
|
| 82 |
+
# Only provide text, not audio
|
| 83 |
+
from neuralset.dataloader import SegmentData
|
| 84 |
+
data = {"text": torch.randn(2, 2, 32, 20), "subject_id": torch.zeros(2, dtype=torch.long)}
|
| 85 |
+
batch = SegmentData(data=data, segments=_make_segments(2))
|
| 86 |
+
out = model(batch)
|
| 87 |
+
assert out.shape == (2, 100, 10)
|
| 88 |
+
|
| 89 |
+
def test_modality_dropout_training(self):
|
| 90 |
+
modalities = {"text": (2, 32), "audio": (2, 32)}
|
| 91 |
+
from neuraltrain.models.transformer import TransformerEncoder
|
| 92 |
+
|
| 93 |
+
from cortexlab.core.model import FmriEncoder
|
| 94 |
+
config = FmriEncoder(
|
| 95 |
+
hidden=256, max_seq_len=128, modality_dropout=0.5,
|
| 96 |
+
encoder=TransformerEncoder(depth=2, heads=4),
|
| 97 |
+
)
|
| 98 |
+
model = config.build(feature_dims=modalities, n_outputs=100, n_output_timesteps=10)
|
| 99 |
+
model.train()
|
| 100 |
+
batch = _make_batch(modalities)
|
| 101 |
+
out = model(batch)
|
| 102 |
+
assert out.shape == (2, 100, 10)
|
| 103 |
+
|
| 104 |
+
def test_linear_baseline(self):
|
| 105 |
+
modalities = {"text": (2, 32)}
|
| 106 |
+
from cortexlab.core.model import FmriEncoder
|
| 107 |
+
config = FmriEncoder(hidden=256, linear_baseline=True)
|
| 108 |
+
model = config.build(feature_dims=modalities, n_outputs=100, n_output_timesteps=10)
|
| 109 |
+
batch = _make_batch(modalities)
|
| 110 |
+
out = model(batch)
|
| 111 |
+
assert out.shape == (2, 100, 10)
|