Spaces:
Runtime error
Runtime error
File size: 30,809 Bytes
fd601de |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"minmax normalization\n",
"all files in dataset: 180\n"
]
},
{
"ename": "RuntimeError",
"evalue": "applying transform <monai.transforms.compose.Compose object at 0x000001F80C2EC580>",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:103\u001b[0m, in \u001b[0;36mapply_transform\u001b[1;34m(transform, data, map_items, unpack_items, log_stats)\u001b[0m\n\u001b[0;32m 102\u001b[0m \u001b[39mreturn\u001b[39;00m [_apply_transform(transform, item, unpack_items) \u001b[39mfor\u001b[39;00m item \u001b[39min\u001b[39;00m data]\n\u001b[1;32m--> 103\u001b[0m \u001b[39mreturn\u001b[39;00m _apply_transform(transform, data, unpack_items)\n\u001b[0;32m 104\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m \u001b[39mas\u001b[39;00m e:\n\u001b[0;32m 105\u001b[0m \u001b[39m# if in debug mode, don't swallow exception so that the breakpoint\u001b[39;00m\n\u001b[0;32m 106\u001b[0m \u001b[39m# appears where the exception was raised.\u001b[39;00m\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:67\u001b[0m, in \u001b[0;36m_apply_transform\u001b[1;34m(transform, parameters, unpack_parameters)\u001b[0m\n\u001b[0;32m 65\u001b[0m \u001b[39mreturn\u001b[39;00m transform(\u001b[39m*\u001b[39mparameters)\n\u001b[1;32m---> 67\u001b[0m \u001b[39mreturn\u001b[39;00m transform(parameters)\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\intensity\\dictionary.py:569\u001b[0m, in \u001b[0;36mScaleIntensityd.__call__\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m 568\u001b[0m \u001b[39mfor\u001b[39;00m key \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mkey_iterator(d):\n\u001b[1;32m--> 569\u001b[0m d[key] \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mscaler(d[key])\n\u001b[0;32m 570\u001b[0m \u001b[39mreturn\u001b[39;00m d\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\intensity\\array.py:463\u001b[0m, in \u001b[0;36mScaleIntensity.__call__\u001b[1;34m(self, img)\u001b[0m\n\u001b[0;32m 462\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m--> 463\u001b[0m ret \u001b[39m=\u001b[39m rescale_array(img_t, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mminv, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmaxv, dtype\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdtype)\n\u001b[0;32m 464\u001b[0m \u001b[39melse\u001b[39;00m:\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\utils.py:186\u001b[0m, in \u001b[0;36mrescale_array\u001b[1;34m(arr, minv, maxv, dtype)\u001b[0m\n\u001b[0;32m 185\u001b[0m \u001b[39mif\u001b[39;00m dtype \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m--> 186\u001b[0m arr, \u001b[39m*\u001b[39m_ \u001b[39m=\u001b[39m convert_data_type(arr, dtype\u001b[39m=\u001b[39;49mdtype)\n\u001b[0;32m 187\u001b[0m mina \u001b[39m=\u001b[39m arr\u001b[39m.\u001b[39mmin()\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\utils\\type_conversion.py:325\u001b[0m, in \u001b[0;36mconvert_data_type\u001b[1;34m(data, output_type, device, dtype, wrap_sequence, safe)\u001b[0m\n\u001b[0;32m 324\u001b[0m \u001b[39mreturn\u001b[39;00m data_, orig_type, orig_device\n\u001b[1;32m--> 325\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mUnsupported output type: \u001b[39m\u001b[39m{\u001b[39;00moutput_type\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m)\n",
"\u001b[1;31mValueError\u001b[0m: Unsupported output type: <class 'str'>",
"\nThe above exception was the direct cause of the following exception:\n",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:103\u001b[0m, in \u001b[0;36mapply_transform\u001b[1;34m(transform, data, map_items, unpack_items, log_stats)\u001b[0m\n\u001b[0;32m 102\u001b[0m \u001b[39mreturn\u001b[39;00m [_apply_transform(transform, item, unpack_items) \u001b[39mfor\u001b[39;00m item \u001b[39min\u001b[39;00m data]\n\u001b[1;32m--> 103\u001b[0m \u001b[39mreturn\u001b[39;00m _apply_transform(transform, data, unpack_items)\n\u001b[0;32m 104\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m \u001b[39mas\u001b[39;00m e:\n\u001b[0;32m 105\u001b[0m \u001b[39m# if in debug mode, don't swallow exception so that the breakpoint\u001b[39;00m\n\u001b[0;32m 106\u001b[0m \u001b[39m# appears where the exception was raised.\u001b[39;00m\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:67\u001b[0m, in \u001b[0;36m_apply_transform\u001b[1;34m(transform, parameters, unpack_parameters)\u001b[0m\n\u001b[0;32m 65\u001b[0m \u001b[39mreturn\u001b[39;00m transform(\u001b[39m*\u001b[39mparameters)\n\u001b[1;32m---> 67\u001b[0m \u001b[39mreturn\u001b[39;00m transform(parameters)\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\compose.py:451\u001b[0m, in \u001b[0;36mCompose.__call__\u001b[1;34m(self, input_, start, end, threading)\u001b[0m\n\u001b[0;32m 450\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__call__\u001b[39m(\u001b[39mself\u001b[39m, input_, start\u001b[39m=\u001b[39m\u001b[39m0\u001b[39m, end\u001b[39m=\u001b[39m\u001b[39mNone\u001b[39;00m, threading\u001b[39m=\u001b[39m\u001b[39mFalse\u001b[39;00m):\n\u001b[1;32m--> 451\u001b[0m \u001b[39mreturn\u001b[39;00m execute_compose(\n\u001b[0;32m 452\u001b[0m input_,\n\u001b[0;32m 453\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtransforms,\n\u001b[0;32m 454\u001b[0m start\u001b[39m=\u001b[39;49mstart,\n\u001b[0;32m 455\u001b[0m end\u001b[39m=\u001b[39;49mend,\n\u001b[0;32m 456\u001b[0m map_items\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmap_items,\n\u001b[0;32m 457\u001b[0m unpack_items\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49munpack_items,\n\u001b[0;32m 458\u001b[0m lazy_evaluation\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mlazy_evaluation, \u001b[39m# type: ignore\u001b[39;49;00m\n\u001b[0;32m 459\u001b[0m overrides\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moverrides,\n\u001b[0;32m 460\u001b[0m override_keys\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49moverride_keys,\n\u001b[0;32m 461\u001b[0m threading\u001b[39m=\u001b[39;49mthreading,\n\u001b[0;32m 462\u001b[0m log_stats\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mlog_stats,\n\u001b[0;32m 463\u001b[0m verbose\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mverbose,\n\u001b[0;32m 464\u001b[0m )\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\compose.py:199\u001b[0m, in \u001b[0;36mexecute_compose\u001b[1;34m(data, transforms, map_items, unpack_items, start, end, lazy_evaluation, overrides, override_keys, threading, log_stats, verbose)\u001b[0m\n\u001b[0;32m 191\u001b[0m data \u001b[39m=\u001b[39m evaluate_with_overrides(\n\u001b[0;32m 192\u001b[0m data,\n\u001b[0;32m 193\u001b[0m _transform,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 197\u001b[0m verbose\u001b[39m=\u001b[39mverbose,\n\u001b[0;32m 198\u001b[0m )\n\u001b[1;32m--> 199\u001b[0m data \u001b[39m=\u001b[39m apply_transform(_transform, data, map_items, unpack_items, log_stats)\n\u001b[0;32m 200\u001b[0m data \u001b[39m=\u001b[39m evaluate_with_overrides(\n\u001b[0;32m 201\u001b[0m data, \u001b[39mNone\u001b[39;00m, lazy_evaluation\u001b[39m=\u001b[39mlazy_evaluation, overrides\u001b[39m=\u001b[39moverrides, override_keys\u001b[39m=\u001b[39moverride_keys, verbose\u001b[39m=\u001b[39mverbose\n\u001b[0;32m 202\u001b[0m )\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:130\u001b[0m, in \u001b[0;36mapply_transform\u001b[1;34m(transform, data, map_items, unpack_items, log_stats)\u001b[0m\n\u001b[0;32m 129\u001b[0m _log_stats(data\u001b[39m=\u001b[39mdata)\n\u001b[1;32m--> 130\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mapplying transform \u001b[39m\u001b[39m{\u001b[39;00mtransform\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m) \u001b[39mfrom\u001b[39;00m \u001b[39me\u001b[39;00m\n",
"\u001b[1;31mRuntimeError\u001b[0m: applying transform <monai.transforms.intensity.dictionary.ScaleIntensityd object at 0x000001F80D2FC8E0>",
"\nThe above exception was the direct cause of the following exception:\n",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[5], line 35\u001b[0m\n\u001b[0;32m 26\u001b[0m train_ds, val_ds \u001b[39m=\u001b[39m load_volumes(train_transforms, \n\u001b[0;32m 27\u001b[0m train_ds, \n\u001b[0;32m 28\u001b[0m val_ds, \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 31\u001b[0m ifsave\u001b[39m=\u001b[39m\u001b[39mFalse\u001b[39;00m,\n\u001b[0;32m 32\u001b[0m ifcheck\u001b[39m=\u001b[39mifcheck_volume)\n\u001b[0;32m 34\u001b[0m loader \u001b[39m=\u001b[39m DataLoader(train_ds, batch_size\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m)\n\u001b[1;32m---> 35\u001b[0m \u001b[39mfor\u001b[39;00m idx, checkdata \u001b[39min\u001b[39;00m \u001b[39menumerate\u001b[39m(loader):\n\u001b[0;32m 36\u001b[0m \u001b[39mprint\u001b[39m(checkdata[\u001b[39m'\u001b[39m\u001b[39mlabel\u001b[39m\u001b[39m'\u001b[39m]\u001b[39m.\u001b[39mshape)\n\u001b[0;32m 37\u001b[0m \u001b[39mbreak\u001b[39;00m\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\torch\\utils\\data\\dataloader.py:628\u001b[0m, in \u001b[0;36m_BaseDataLoaderIter.__next__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 625\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_sampler_iter \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m 626\u001b[0m \u001b[39m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[39;00m\n\u001b[0;32m 627\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_reset() \u001b[39m# type: ignore[call-arg]\u001b[39;00m\n\u001b[1;32m--> 628\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_next_data()\n\u001b[0;32m 629\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_num_yielded \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m \u001b[39m1\u001b[39m\n\u001b[0;32m 630\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_dataset_kind \u001b[39m==\u001b[39m _DatasetKind\u001b[39m.\u001b[39mIterable \u001b[39mand\u001b[39;00m \\\n\u001b[0;32m 631\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_IterableDataset_len_called \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m \\\n\u001b[0;32m 632\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_num_yielded \u001b[39m>\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_IterableDataset_len_called:\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\torch\\utils\\data\\dataloader.py:671\u001b[0m, in \u001b[0;36m_SingleProcessDataLoaderIter._next_data\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 669\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_next_data\u001b[39m(\u001b[39mself\u001b[39m):\n\u001b[0;32m 670\u001b[0m index \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_next_index() \u001b[39m# may raise StopIteration\u001b[39;00m\n\u001b[1;32m--> 671\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_dataset_fetcher\u001b[39m.\u001b[39;49mfetch(index) \u001b[39m# may raise StopIteration\u001b[39;00m\n\u001b[0;32m 672\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_pin_memory:\n\u001b[0;32m 673\u001b[0m data \u001b[39m=\u001b[39m _utils\u001b[39m.\u001b[39mpin_memory\u001b[39m.\u001b[39mpin_memory(data, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_pin_memory_device)\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py:58\u001b[0m, in \u001b[0;36m_MapDatasetFetcher.fetch\u001b[1;34m(self, possibly_batched_index)\u001b[0m\n\u001b[0;32m 56\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset\u001b[39m.\u001b[39m__getitems__(possibly_batched_index)\n\u001b[0;32m 57\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m---> 58\u001b[0m data \u001b[39m=\u001b[39m [\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset[idx] \u001b[39mfor\u001b[39;00m idx \u001b[39min\u001b[39;00m possibly_batched_index]\n\u001b[0;32m 59\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 60\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset[possibly_batched_index]\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py:58\u001b[0m, in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 56\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset\u001b[39m.\u001b[39m__getitems__(possibly_batched_index)\n\u001b[0;32m 57\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m---> 58\u001b[0m data \u001b[39m=\u001b[39m [\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdataset[idx] \u001b[39mfor\u001b[39;00m idx \u001b[39min\u001b[39;00m possibly_batched_index]\n\u001b[0;32m 59\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 60\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset[possibly_batched_index]\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\data\\dataset.py:109\u001b[0m, in \u001b[0;36mDataset.__getitem__\u001b[1;34m(self, index)\u001b[0m\n\u001b[0;32m 106\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39misinstance\u001b[39m(index, collections\u001b[39m.\u001b[39mabc\u001b[39m.\u001b[39mSequence):\n\u001b[0;32m 107\u001b[0m \u001b[39m# dataset[[1, 3, 4]]\u001b[39;00m\n\u001b[0;32m 108\u001b[0m \u001b[39mreturn\u001b[39;00m Subset(dataset\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m, indices\u001b[39m=\u001b[39mindex)\n\u001b[1;32m--> 109\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_transform(index)\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\data\\dataset.py:95\u001b[0m, in \u001b[0;36mDataset._transform\u001b[1;34m(self, index)\u001b[0m\n\u001b[0;32m 91\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m 92\u001b[0m \u001b[39mFetch single data item from `self.data`.\u001b[39;00m\n\u001b[0;32m 93\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m 94\u001b[0m data_i \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdata[index]\n\u001b[1;32m---> 95\u001b[0m \u001b[39mreturn\u001b[39;00m apply_transform(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtransform, data_i) \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtransform \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39melse\u001b[39;00m data_i\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:130\u001b[0m, in \u001b[0;36mapply_transform\u001b[1;34m(transform, data, map_items, unpack_items, log_stats)\u001b[0m\n\u001b[0;32m 128\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 129\u001b[0m _log_stats(data\u001b[39m=\u001b[39mdata)\n\u001b[1;32m--> 130\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mapplying transform \u001b[39m\u001b[39m{\u001b[39;00mtransform\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m) \u001b[39mfrom\u001b[39;00m \u001b[39me\u001b[39;00m\n",
"\u001b[1;31mRuntimeError\u001b[0m: applying transform <monai.transforms.compose.Compose object at 0x000001F80C2EC580>"
]
}
],
"source": [
"from basics import get_transforms, get_file_list, load_volumes, crop_volumes\t\n",
"from torch.utils.data import DataLoader\n",
"import torch\n",
"dataset_path=\"E:\\Projects\\yang_proj\\Task1\\pelvis\"\n",
"normalize='minmax'\n",
"pad='minimum'\n",
"train_number=1\n",
"val_number=1\n",
"train_batch_size=8\n",
"val_batch_size=1\n",
"saved_name_train='./train_ds_2d.csv'\n",
"saved_name_val='./val_ds_2d.csv'\n",
"resized_size=(512,512,None)\n",
"div_size=(16,16,None)\n",
"center_crop=20\n",
"ifcheck_volume=False\n",
"ifcheck_sclices=False\n",
"\n",
"# volume-level transforms for both image and label\n",
"train_transforms = get_transforms(normalize,pad,resized_size,div_size,center_crop=center_crop)\n",
"train_ds, val_ds = get_file_list(dataset_path, \n",
" train_number, \n",
" val_number)\n",
"#train_ds, val_ds = crop_volumes(train_ds, val_ds,center_crop)\n",
"\n",
"train_ds, val_ds = load_volumes(train_transforms, \n",
" train_ds, \n",
" val_ds, \n",
" saved_name_train, \n",
" saved_name_val,\n",
" ifsave=False,\n",
" ifcheck=ifcheck_volume)\n",
"\n",
"loader = DataLoader(train_ds, batch_size=1)\n",
"for idx, checkdata in enumerate(loader):\n",
" print(checkdata['label'].shape)\n",
" break\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[{class: 'CenterSpatialCrop', id: 1809600453888, tracing: True, lazy_evaluation: False, do_transforms: True, orig_size: (565, 338, 146), extra_info: {'cropped': [0, 0, 0, 0, 63, 63]}}, {class: 'ResizeWithPadOrCrop', id: 1809599226160, tracing: True, lazy_evaluation: False, do_transforms: True, orig_size: (565, 338, 20), extra_info: {'pad_info': {class: 'SpatialPad', id: 1809817783648, tracing: True, lazy_evaluation: False, do_transforms: True, orig_size: (512, 338, 20), extra_info: {'padded': ((0, 0), (0, 0), (87, 87), (0, 0)), 'mode': 'minimum'}}, 'crop_info': {class: 'CenterSpatialCrop', id: 1809600451728, tracing: True, lazy_evaluation: False, do_transforms: True, orig_size: (565, 338, 20), extra_info: {'cropped': [26, 27, 0, 0, 0, 0]}}}}, {class: 'Rotate90', id: 1809600452736, tracing: True, lazy_evaluation: False, do_transforms: True, orig_size: (512, 512, 20), extra_info: {'axes': [0, 1], 'k': 3}}]\n"
]
},
{
"ename": "RuntimeError",
"evalue": "applying transform <bound method Padd.inverse of <monai.transforms.croppad.dictionary.DivisiblePadd object at 0x000001A554979430>>",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:103\u001b[0m, in \u001b[0;36mapply_transform\u001b[1;34m(transform, data, map_items, unpack_items, log_stats)\u001b[0m\n\u001b[0;32m 102\u001b[0m \u001b[39mreturn\u001b[39;00m [_apply_transform(transform, item, unpack_items) \u001b[39mfor\u001b[39;00m item \u001b[39min\u001b[39;00m data]\n\u001b[1;32m--> 103\u001b[0m \u001b[39mreturn\u001b[39;00m _apply_transform(transform, data, unpack_items)\n\u001b[0;32m 104\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m \u001b[39mas\u001b[39;00m e:\n\u001b[0;32m 105\u001b[0m \u001b[39m# if in debug mode, don't swallow exception so that the breakpoint\u001b[39;00m\n\u001b[0;32m 106\u001b[0m \u001b[39m# appears where the exception was raised.\u001b[39;00m\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:67\u001b[0m, in \u001b[0;36m_apply_transform\u001b[1;34m(transform, parameters, unpack_parameters)\u001b[0m\n\u001b[0;32m 65\u001b[0m \u001b[39mreturn\u001b[39;00m transform(\u001b[39m*\u001b[39mparameters)\n\u001b[1;32m---> 67\u001b[0m \u001b[39mreturn\u001b[39;00m transform(parameters)\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\croppad\\dictionary.py:162\u001b[0m, in \u001b[0;36mPadd.inverse\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m 161\u001b[0m \u001b[39mfor\u001b[39;00m key \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mkey_iterator(d):\n\u001b[1;32m--> 162\u001b[0m d[key] \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpadder\u001b[39m.\u001b[39;49minverse(d[key])\n\u001b[0;32m 163\u001b[0m \u001b[39mreturn\u001b[39;00m d\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\croppad\\array.py:156\u001b[0m, in \u001b[0;36mPad.inverse\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m 155\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39minverse\u001b[39m(\u001b[39mself\u001b[39m, data: MetaTensor) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m MetaTensor:\n\u001b[1;32m--> 156\u001b[0m transform \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpop_transform(data)\n\u001b[0;32m 157\u001b[0m padded \u001b[39m=\u001b[39m transform[TraceKeys\u001b[39m.\u001b[39mEXTRA_INFO][\u001b[39m\"\u001b[39m\u001b[39mpadded\u001b[39m\u001b[39m\"\u001b[39m]\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\inverse.py:301\u001b[0m, in \u001b[0;36mTraceableTransform.pop_transform\u001b[1;34m(self, data, key, check)\u001b[0m\n\u001b[0;32m 287\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m 288\u001b[0m \u001b[39mReturn and pop the most recent transform.\u001b[39;00m\n\u001b[0;32m 289\u001b[0m \n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 299\u001b[0m \u001b[39m - RuntimeError: data is neither `MetaTensor` nor dictionary\u001b[39;00m\n\u001b[0;32m 300\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m--> 301\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mget_most_recent_transform(data, key, check, pop\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m)\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\inverse.py:283\u001b[0m, in \u001b[0;36mTraceableTransform.get_most_recent_transform\u001b[1;34m(self, data, key, check, pop)\u001b[0m\n\u001b[0;32m 282\u001b[0m \u001b[39mif\u001b[39;00m check:\n\u001b[1;32m--> 283\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mcheck_transforms_match(all_transforms[\u001b[39m-\u001b[39;49m\u001b[39m1\u001b[39;49m])\n\u001b[0;32m 284\u001b[0m \u001b[39mreturn\u001b[39;00m all_transforms\u001b[39m.\u001b[39mpop() \u001b[39mif\u001b[39;00m pop \u001b[39melse\u001b[39;00m all_transforms[\u001b[39m-\u001b[39m\u001b[39m1\u001b[39m]\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\inverse.py:250\u001b[0m, in \u001b[0;36mTraceableTransform.check_transforms_match\u001b[1;34m(self, transform)\u001b[0m\n\u001b[0;32m 249\u001b[0m \u001b[39mreturn\u001b[39;00m\n\u001b[1;32m--> 250\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\n\u001b[0;32m 251\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mError \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m getting the most recently \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m 252\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mapplied invertible transform \u001b[39m\u001b[39m{\u001b[39;00mxform_name\u001b[39m}\u001b[39;00m\u001b[39m \u001b[39m\u001b[39m{\u001b[39;00mxform_id\u001b[39m}\u001b[39;00m\u001b[39m != \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mid\u001b[39m(\u001b[39mself\u001b[39m)\u001b[39m}\u001b[39;00m\u001b[39m.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[0;32m 253\u001b[0m )\n",
"\u001b[1;31mRuntimeError\u001b[0m: Error DivisiblePad getting the most recently applied invertible transform Rotate90 1809600452736 != 1809600452064.",
"\nThe above exception was the direct cause of the following exception:\n",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[10], line 6\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[39mdict\u001b[39m \u001b[39m=\u001b[39m {\u001b[39m\"\u001b[39m\u001b[39mimage\u001b[39m\u001b[39m\"\u001b[39m: labeldata} \u001b[39m# always set val_batch_size=1\u001b[39;00m\n\u001b[0;32m 5\u001b[0m \u001b[39mwith\u001b[39;00m allow_missing_keys_mode(train_transforms):\n\u001b[1;32m----> 6\u001b[0m reversed_data\u001b[39m=\u001b[39mtrain_transforms\u001b[39m.\u001b[39;49minverse(\u001b[39mdict\u001b[39;49m)\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\compose.py:473\u001b[0m, in \u001b[0;36mCompose.inverse\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m 471\u001b[0m \u001b[39m# loop backwards over transforms\u001b[39;00m\n\u001b[0;32m 472\u001b[0m \u001b[39mfor\u001b[39;00m t \u001b[39min\u001b[39;00m \u001b[39mreversed\u001b[39m(invertible_transforms):\n\u001b[1;32m--> 473\u001b[0m data \u001b[39m=\u001b[39m apply_transform(t\u001b[39m.\u001b[39;49minverse, data, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmap_items, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49munpack_items, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mlog_stats)\n\u001b[0;32m 474\u001b[0m \u001b[39mreturn\u001b[39;00m data\n",
"File \u001b[1;32me:\\Projects\\yang_proj\\torch\\venv\\lib\\site-packages\\monai\\transforms\\transform.py:130\u001b[0m, in \u001b[0;36mapply_transform\u001b[1;34m(transform, data, map_items, unpack_items, log_stats)\u001b[0m\n\u001b[0;32m 128\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 129\u001b[0m _log_stats(data\u001b[39m=\u001b[39mdata)\n\u001b[1;32m--> 130\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mRuntimeError\u001b[39;00m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mapplying transform \u001b[39m\u001b[39m{\u001b[39;00mtransform\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m) \u001b[39mfrom\u001b[39;00m \u001b[39me\u001b[39;00m\n",
"\u001b[1;31mRuntimeError\u001b[0m: applying transform <bound method Padd.inverse of <monai.transforms.croppad.dictionary.DivisiblePadd object at 0x000001A554979430>>"
]
}
],
"source": [
"from monai.transforms.utils import allow_missing_keys_mode\n",
"labeldata=checkdata['label']\n",
"#print(labeldata.applied_operations)\n",
"dict = {\"image\": labeldata} # always set val_batch_size=1\n",
"with allow_missing_keys_mode(train_transforms):\n",
" reversed_data=train_transforms.inverse(dict)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from slice_loader import myslicesloader\n",
"dataset_path = r\"F:\\yang_Projects\\Datasets\\Task1\\pelvis\"\n",
"train_volume_ds,_,train_loader,_,_ = myslicesloader(dataset_path,\n",
" normalize='none',\n",
" train_number=2,\n",
" val_number=1,\n",
" train_batch_size=4,\n",
" val_batch_size=1,\n",
" saved_name_train='./train_ds_2d.csv',\n",
" saved_name_val='./val_ds_2d.csv',\n",
" resized_size=(512, 512, None),\n",
" div_size=(16,16,None),\n",
" ifcheck_volume=False,\n",
" ifcheck_sclices=False,)\n",
"# test data\n",
"#iter_train = iter(train_loader)\n",
"#batch = next(iter_train)\n",
"#print(batch['image'].shape)\n",
"\n",
"# slice data\n",
"slice_number=sum(train_volume_ds[i]['image'].shape[-1] for i in range(len(train_volume_ds)))\n",
"print(slice_number)\n",
"\n",
"# test data loop\n",
"\n",
"#for i, batch in enumerate(train_loader):\n",
"# print(i, batch['image'].shape)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
|