chongjg commited on
Commit
c9aa2cf
·
verified ·
1 Parent(s): 38c5595

upload processed fMRI (CC2017)

Browse files
CC2017-processed/all-test-allsub-fmri.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c45a6f47b768a28463429be1d91bb9c5f5b1eaa1154727a0f2c585d519bfbf6
3
+ size 947650688
CC2017-processed/all-train-allsub-fmri.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a9241586ef305228bbace0f145b3180f142c1ef2c9a1c940e4c850a135f6314
3
+ size 3411542144
CC2017-processed/dataset_cc2017.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import random
4
+ import decord
5
+ decord.bridge.set_bridge('torch')
6
+
7
+ import numpy as np
8
+
9
+ from einops import rearrange
10
+
11
+ class CC2017_fmri_video_dataset(torch.utils.data.Dataset):
12
+
13
+ def __init__(self,
14
+ path,
15
+ image_size=(256, 256),
16
+ time_sbj=240,
17
+ lag=3,
18
+ window_size=2,
19
+ phase='train',
20
+
21
+ width: int = 256,
22
+ height: int = 256,
23
+ n_sample_frames: int = 6,
24
+ sample_frame_rate: int = 10,
25
+
26
+ # data_aug: bool = False,
27
+
28
+ stage: str = 'fmri-video',
29
+
30
+ load_to_memory = False,
31
+ subjects = [0,1,2],
32
+ ):
33
+
34
+ super().__init__()
35
+ assert phase in ['train', 'test']
36
+ # if data_aug:
37
+ # assert phase == 'train'
38
+
39
+ self.fmri_frames = 246
40
+ self.subjects = subjects
41
+ if phase == 'train':
42
+ self.segments = 18
43
+ self.records = 2
44
+ self.str = 'seg'
45
+ else:
46
+ self.segments = 5
47
+ self.records = 10
48
+ self.str = 'test'
49
+ self.time_per_sbj = time_sbj
50
+ self.len = 1 * self.segments * self.time_per_sbj # * self.records
51
+ self.phase = phase
52
+ self.stage = stage
53
+ # self.data_aug = data_aug
54
+ # if self.data_aug:
55
+ # print('using data augmentation.')
56
+ # self.image_transform = transforms.Compose([
57
+ # random_crop(round(image_size[0] * 0.8), p=0.5),
58
+ # transforms.Resize(image_size),
59
+ # ])
60
+
61
+ print('start load ' + phase + ' dataset\nstage : ' + self.stage)
62
+
63
+ #################### prompt #####################
64
+
65
+ self.frame_level_prompt = np.load(os.path.join(path, 'stimuli/text_%s_256_3hz.npy' % phase))
66
+ print('prompt load finished.')
67
+
68
+ #################### video #####################
69
+
70
+ self.width = width
71
+ self.height = height
72
+ self.n_sample_frames = n_sample_frames
73
+ self.sample_frame_rate = sample_frame_rate
74
+ self.video_data = []
75
+ self.load_to_memory = load_to_memory
76
+ if self.load_to_memory:
77
+ for segid in range(1, self.segments+1):
78
+ print(f'video loading ... {segid}/{self.segments}')
79
+ self.video_data.append([])
80
+ video = decord.VideoReader(os.path.join(path, 'stimuli/%s%d.mp4' % (self.str, segid)), width=self.width, height=self.height)
81
+ for sampleid in range(self.time_per_sbj):
82
+ sample_index = [int((sampleid * n_sample_frames + i) * sample_frame_rate) for i in range(n_sample_frames)]
83
+ sample = video.get_batch(sample_index)
84
+ sample = rearrange(sample, "f h w c -> f c h w")
85
+ self.video_data[-1].append(sample.numpy())
86
+ del video
87
+ self.video_data = (np.array(self.video_data) / 127.5 - 1.0).astype(np.float16)
88
+ else:
89
+ for segid in range(1, self.segments+1):
90
+ video = decord.VideoReader(os.path.join(path, 'stimuli/%s%d.mp4' % (self.str, segid)), width=self.width, height=self.height)
91
+ self.video_data.append(video)
92
+
93
+ print('video load finished.')
94
+
95
+ #################### fmri #####################
96
+
97
+ self.image_size = image_size # (H, W)
98
+ self.lag = lag
99
+ self.window_size = window_size
100
+
101
+ self.path = path
102
+
103
+ self.fmri_data = np.load(os.path.join(path, f'./all-{phase}-allsub-fmri.npy'))
104
+ print('fmri load finished.')
105
+
106
+ self.records = 1
107
+
108
+ self.aver_fmri = 0.0 * self.__getitem__(0)['image'][0]
109
+
110
+ def __len__(self):
111
+ return self.len
112
+
113
+ def getIndex(self, index):
114
+ subid = index // (self.segments * self.records * self.time_per_sbj)
115
+ index %= (self.segments * self.records * self.time_per_sbj)
116
+ segid = index // (self.records * self.time_per_sbj)
117
+ index %= (self.records * self.time_per_sbj)
118
+ recid = index // self.time_per_sbj
119
+ index %= self.time_per_sbj
120
+ timid = index
121
+ promptid = segid * self.time_per_sbj + timid
122
+ return subid, segid, recid, timid, promptid
123
+
124
+ def __getitem__(self, index):
125
+ subid, segid, recid, timid, promptid = self.getIndex(index)
126
+
127
+ frame_id = random.randint(0, self.n_sample_frames-1)
128
+ text_prompt = self.frame_level_prompt[segid, timid, frame_id]
129
+
130
+ sample_index = [int((timid * self.n_sample_frames + i) * self.sample_frame_rate) for i in range(self.n_sample_frames)]
131
+ if self.stage == 'fmri-text':
132
+ sample_index = [sample_index[frame_id]]
133
+
134
+ if self.load_to_memory:
135
+ video = self.video_data[segid, timid]
136
+ video = torch.from_numpy(video)
137
+ else:
138
+ video = self.video_data[segid].get_batch(sample_index)
139
+ video = rearrange(video, "f h w c -> f c h w")
140
+ video = video / 127.5 - 1.0
141
+
142
+ if self.stage == 'fmri-text':
143
+ if self.load_to_memory:
144
+ video = video[frame_id]
145
+ else:
146
+ video = video[0]
147
+ # rescale [-1, 1] to [0, 1]
148
+ video = (video + 1) / 2
149
+
150
+ fmri = self.fmri_data[self.subjects][:,segid, recid, timid : timid + self.window_size]
151
+ fmri = torch.from_numpy(fmri)[:, :, None]
152
+
153
+ # if self.data_aug:
154
+ # video = self.image_transform(video)
155
+ # text_prompt = self.text_transform(text_prompt)
156
+
157
+ item = {'text_prompt': text_prompt, 'pixel_values': video, 'image': fmri}
158
+ return item
CC2017-processed/stimuli/text_test_256_3hz.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb8af4eb85e7d2c8f75bb957335d7bb370c1542dff8e5b338dd8fc44b1d1ee3
3
+ size 2505728
CC2017-processed/stimuli/text_train_256_3hz.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d932466ac9251013def65d029a0443b55c9a114a6d9e4866fd1de38a7cf8ee3
3
+ size 13789568
CC2017-processed/vc_roi.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc5a6ff1889ccd128ee3a703cb49c64a805d6719cc8f0cef303334d1df3323ad
3
+ size 30589