File size: 43,928 Bytes
cb83bbc | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "a7dd2ed2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Defaulting to user installation because normal site-packages is not writeable\n",
"Collecting timm\n",
" Downloading timm-1.0.15-py3-none-any.whl.metadata (52 kB)\n",
"Requirement already satisfied: torch in /home/ubuntu/.local/lib/python3.12/site-packages (from timm) (2.6.0)\n",
"Requirement already satisfied: torchvision in /home/ubuntu/.local/lib/python3.12/site-packages (from timm) (0.21.0)\n",
"Requirement already satisfied: pyyaml in /opt/anaconda3/lib/python3.12/site-packages (from timm) (6.0.1)\n",
"Requirement already satisfied: huggingface_hub in /home/ubuntu/.local/lib/python3.12/site-packages (from timm) (0.29.3)\n",
"Requirement already satisfied: safetensors in /home/ubuntu/.local/lib/python3.12/site-packages (from timm) (0.5.3)\n",
"Requirement already satisfied: filelock in /opt/anaconda3/lib/python3.12/site-packages (from huggingface_hub->timm) (3.13.1)\n",
"Requirement already satisfied: fsspec>=2023.5.0 in /opt/anaconda3/lib/python3.12/site-packages (from huggingface_hub->timm) (2024.6.1)\n",
"Requirement already satisfied: packaging>=20.9 in /opt/anaconda3/lib/python3.12/site-packages (from huggingface_hub->timm) (24.1)\n",
"Requirement already satisfied: requests in /opt/anaconda3/lib/python3.12/site-packages (from huggingface_hub->timm) (2.32.3)\n",
"Requirement already satisfied: tqdm>=4.42.1 in /opt/anaconda3/lib/python3.12/site-packages (from huggingface_hub->timm) (4.66.5)\n",
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/anaconda3/lib/python3.12/site-packages (from huggingface_hub->timm) (4.11.0)\n",
"Requirement already satisfied: networkx in /opt/anaconda3/lib/python3.12/site-packages (from torch->timm) (3.3)\n",
"Requirement already satisfied: jinja2 in /opt/anaconda3/lib/python3.12/site-packages (from torch->timm) (3.1.4)\n",
"Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (12.4.127)\n",
"Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (12.4.127)\n",
"Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (12.4.127)\n",
"Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (9.1.0.70)\n",
"Requirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (12.4.5.8)\n",
"Requirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (11.2.1.3)\n",
"Requirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (10.3.5.147)\n",
"Requirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (11.6.1.9)\n",
"Requirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (12.3.1.170)\n",
"Requirement already satisfied: nvidia-cusparselt-cu12==0.6.2 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (0.6.2)\n",
"Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (2.21.5)\n",
"Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (12.4.127)\n",
"Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (12.4.127)\n",
"Requirement already satisfied: triton==3.2.0 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (3.2.0)\n",
"Requirement already satisfied: setuptools in /opt/anaconda3/lib/python3.12/site-packages (from torch->timm) (75.1.0)\n",
"Requirement already satisfied: sympy==1.13.1 in /home/ubuntu/.local/lib/python3.12/site-packages (from torch->timm) (1.13.1)\n",
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in /opt/anaconda3/lib/python3.12/site-packages (from sympy==1.13.1->torch->timm) (1.3.0)\n",
"Requirement already satisfied: numpy in /opt/anaconda3/lib/python3.12/site-packages (from torchvision->timm) (1.26.4)\n",
"Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /opt/anaconda3/lib/python3.12/site-packages (from torchvision->timm) (10.4.0)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /opt/anaconda3/lib/python3.12/site-packages (from jinja2->torch->timm) (2.1.3)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /opt/anaconda3/lib/python3.12/site-packages (from requests->huggingface_hub->timm) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /opt/anaconda3/lib/python3.12/site-packages (from requests->huggingface_hub->timm) (3.7)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/anaconda3/lib/python3.12/site-packages (from requests->huggingface_hub->timm) (2.2.3)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /opt/anaconda3/lib/python3.12/site-packages (from requests->huggingface_hub->timm) (2024.8.30)\n",
"Downloading timm-1.0.15-py3-none-any.whl (2.4 MB)\n",
"\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m13.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n",
"\u001b[?25hInstalling collected packages: timm\n",
"Successfully installed timm-1.0.15\n"
]
}
],
"source": [
"!pip install timm"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "ff0cd74c",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e878415c59ba4f97a1fac91e71010469",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"config.json: 0%| | 0.00/742 [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "356e15c8e1434fbe9d52f6ec94d6e875",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"model.safetensors: 0%| | 0.00/2.53G [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"ename": "RuntimeError",
"evalue": "Error(s) in loading state_dict for VisionTransformer:\n\tsize mismatch for blocks.0.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.1.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.2.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.3.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.4.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.5.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.6.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.7.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.8.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.9.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.10.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.11.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.12.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.13.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.14.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.15.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.16.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.17.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.18.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.19.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.20.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.21.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.22.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.23.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.24.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.25.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.26.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.27.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.28.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.29.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.30.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.31.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[2], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtimm\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m model \u001b[38;5;241m=\u001b[39m timm\u001b[38;5;241m.\u001b[39mcreate_model(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhf_hub:paige-ai/Virchow2\u001b[39m\u001b[38;5;124m\"\u001b[39m, pretrained\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/timm/models/_factory.py:126\u001b[0m, in \u001b[0;36mcreate_model\u001b[0;34m(model_name, pretrained, pretrained_cfg, pretrained_cfg_overlay, checkpoint_path, cache_dir, scriptable, exportable, no_jit, **kwargs)\u001b[0m\n\u001b[1;32m 124\u001b[0m create_fn \u001b[38;5;241m=\u001b[39m model_entrypoint(model_name)\n\u001b[1;32m 125\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m set_layer_config(scriptable\u001b[38;5;241m=\u001b[39mscriptable, exportable\u001b[38;5;241m=\u001b[39mexportable, no_jit\u001b[38;5;241m=\u001b[39mno_jit):\n\u001b[0;32m--> 126\u001b[0m model \u001b[38;5;241m=\u001b[39m create_fn(\n\u001b[1;32m 127\u001b[0m pretrained\u001b[38;5;241m=\u001b[39mpretrained,\n\u001b[1;32m 128\u001b[0m pretrained_cfg\u001b[38;5;241m=\u001b[39mpretrained_cfg,\n\u001b[1;32m 129\u001b[0m pretrained_cfg_overlay\u001b[38;5;241m=\u001b[39mpretrained_cfg_overlay,\n\u001b[1;32m 130\u001b[0m cache_dir\u001b[38;5;241m=\u001b[39mcache_dir,\n\u001b[1;32m 131\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 132\u001b[0m )\n\u001b[1;32m 134\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m checkpoint_path:\n\u001b[1;32m 135\u001b[0m load_checkpoint(model, checkpoint_path)\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/timm/models/vision_transformer.py:2583\u001b[0m, in \u001b[0;36mvit_huge_patch14_224\u001b[0;34m(pretrained, **kwargs)\u001b[0m\n\u001b[1;32m 2580\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).\u001b[39;00m\n\u001b[1;32m 2581\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 2582\u001b[0m model_args \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mdict\u001b[39m(patch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m14\u001b[39m, embed_dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1280\u001b[39m, depth\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m32\u001b[39m, num_heads\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m16\u001b[39m)\n\u001b[0;32m-> 2583\u001b[0m model \u001b[38;5;241m=\u001b[39m _create_vision_transformer(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mvit_huge_patch14_224\u001b[39m\u001b[38;5;124m'\u001b[39m, pretrained\u001b[38;5;241m=\u001b[39mpretrained, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39m\u001b[38;5;28mdict\u001b[39m(model_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs))\n\u001b[1;32m 2584\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/timm/models/vision_transformer.py:2406\u001b[0m, in \u001b[0;36m_create_vision_transformer\u001b[0;34m(variant, pretrained, **kwargs)\u001b[0m\n\u001b[1;32m 2403\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124msiglip\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m variant \u001b[38;5;129;01mand\u001b[39;00m kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mglobal_pool\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmap\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 2404\u001b[0m strict \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[0;32m-> 2406\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m build_model_with_cfg(\n\u001b[1;32m 2407\u001b[0m VisionTransformer,\n\u001b[1;32m 2408\u001b[0m variant,\n\u001b[1;32m 2409\u001b[0m pretrained,\n\u001b[1;32m 2410\u001b[0m pretrained_filter_fn\u001b[38;5;241m=\u001b[39m_filter_fn,\n\u001b[1;32m 2411\u001b[0m pretrained_strict\u001b[38;5;241m=\u001b[39mstrict,\n\u001b[1;32m 2412\u001b[0m feature_cfg\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mdict\u001b[39m(out_indices\u001b[38;5;241m=\u001b[39mout_indices, feature_cls\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mgetter\u001b[39m\u001b[38;5;124m'\u001b[39m),\n\u001b[1;32m 2413\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 2414\u001b[0m )\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/timm/models/_builder.py:436\u001b[0m, in \u001b[0;36mbuild_model_with_cfg\u001b[0;34m(model_cls, variant, pretrained, pretrained_cfg, pretrained_cfg_overlay, model_cfg, feature_cfg, pretrained_strict, pretrained_filter_fn, cache_dir, kwargs_filter, **kwargs)\u001b[0m\n\u001b[1;32m 434\u001b[0m num_classes_pretrained \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m features \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mgetattr\u001b[39m(model, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mnum_classes\u001b[39m\u001b[38;5;124m'\u001b[39m, kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mnum_classes\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;241m1000\u001b[39m))\n\u001b[1;32m 435\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m pretrained:\n\u001b[0;32m--> 436\u001b[0m load_pretrained(\n\u001b[1;32m 437\u001b[0m model,\n\u001b[1;32m 438\u001b[0m pretrained_cfg\u001b[38;5;241m=\u001b[39mpretrained_cfg,\n\u001b[1;32m 439\u001b[0m num_classes\u001b[38;5;241m=\u001b[39mnum_classes_pretrained,\n\u001b[1;32m 440\u001b[0m in_chans\u001b[38;5;241m=\u001b[39mkwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124min_chans\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;241m3\u001b[39m),\n\u001b[1;32m 441\u001b[0m filter_fn\u001b[38;5;241m=\u001b[39mpretrained_filter_fn,\n\u001b[1;32m 442\u001b[0m strict\u001b[38;5;241m=\u001b[39mpretrained_strict,\n\u001b[1;32m 443\u001b[0m cache_dir\u001b[38;5;241m=\u001b[39mcache_dir,\n\u001b[1;32m 444\u001b[0m )\n\u001b[1;32m 446\u001b[0m \u001b[38;5;66;03m# Wrap the model in a feature extraction module if enabled\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m features:\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/timm/models/_builder.py:260\u001b[0m, in \u001b[0;36mload_pretrained\u001b[0;34m(model, pretrained_cfg, num_classes, in_chans, filter_fn, strict, cache_dir)\u001b[0m\n\u001b[1;32m 257\u001b[0m classifier_bias \u001b[38;5;241m=\u001b[39m state_dict[classifier_name \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.bias\u001b[39m\u001b[38;5;124m'\u001b[39m]\n\u001b[1;32m 258\u001b[0m state_dict[classifier_name \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.bias\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m classifier_bias[label_offset:]\n\u001b[0;32m--> 260\u001b[0m load_result \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mload_state_dict(state_dict, strict\u001b[38;5;241m=\u001b[39mstrict)\n\u001b[1;32m 261\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m load_result\u001b[38;5;241m.\u001b[39mmissing_keys:\n\u001b[1;32m 262\u001b[0m _logger\u001b[38;5;241m.\u001b[39minfo(\n\u001b[1;32m 263\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mMissing keys (\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(load_result\u001b[38;5;241m.\u001b[39mmissing_keys)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m) discovered while loading pretrained weights.\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 264\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m This is expected if model is being adapted.\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/torch/nn/modules/module.py:2581\u001b[0m, in \u001b[0;36mModule.load_state_dict\u001b[0;34m(self, state_dict, strict, assign)\u001b[0m\n\u001b[1;32m 2573\u001b[0m error_msgs\u001b[38;5;241m.\u001b[39minsert(\n\u001b[1;32m 2574\u001b[0m \u001b[38;5;241m0\u001b[39m,\n\u001b[1;32m 2575\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMissing key(s) in state_dict: \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\n\u001b[1;32m 2576\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mk\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m missing_keys)\n\u001b[1;32m 2577\u001b[0m ),\n\u001b[1;32m 2578\u001b[0m )\n\u001b[1;32m 2580\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(error_msgs) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m-> 2581\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 2582\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError(s) in loading state_dict for \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\t\u001b[39;00m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\n\u001b[1;32m 2583\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\t\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(error_msgs)\n\u001b[1;32m 2584\u001b[0m )\n\u001b[1;32m 2585\u001b[0m )\n\u001b[1;32m 2586\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _IncompatibleKeys(missing_keys, unexpected_keys)\n",
"\u001b[0;31mRuntimeError\u001b[0m: Error(s) in loading state_dict for VisionTransformer:\n\tsize mismatch for blocks.0.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.1.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.2.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.3.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.4.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.5.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.6.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.7.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.8.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.9.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.10.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.11.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.12.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.13.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.14.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.15.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.16.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.17.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.18.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.19.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.20.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.21.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.22.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.23.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.24.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.25.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.26.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.27.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.28.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.29.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.30.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832]).\n\tsize mismatch for blocks.31.mlp.fc2.weight: copying a param with shape torch.Size([1280, 3416]) from checkpoint, the shape in current model is torch.Size([1280, 6832])."
]
}
],
"source": [
"import timm\n",
"\n",
"model = timm.create_model(\"hf_hub:paige-ai/Virchow2\", pretrained=True)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2999e249",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2025-04-18 16:09:01.177109: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
"E0000 00:00:1744967341.201028 2970384 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"E0000 00:00:1744967341.209888 2970384 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"W0000 00:00:1744967341.235808 2970384 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
"W0000 00:00:1744967341.235834 2970384 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
"W0000 00:00:1744967341.235838 2970384 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
"W0000 00:00:1744967341.235840 2970384 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n",
"2025-04-18 16:09:01.243585: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
"To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
]
},
{
"ename": "TypeError",
"evalue": "'NoneType' object cannot be interpreted as an integer",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[3], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AutoModel, AutoImageProcessor\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# Load the model directly from Hugging Face\u001b[39;00m\n\u001b[0;32m----> 4\u001b[0m model \u001b[38;5;241m=\u001b[39m AutoModel\u001b[38;5;241m.\u001b[39mfrom_pretrained(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpaige-ai/Virchow2\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 5\u001b[0m processor \u001b[38;5;241m=\u001b[39m AutoImageProcessor\u001b[38;5;241m.\u001b[39mfrom_pretrained(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpaige-ai/Virchow2\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/transformers/models/auto/auto_factory.py:531\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 528\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 529\u001b[0m _ \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 531\u001b[0m config, kwargs \u001b[38;5;241m=\u001b[39m AutoConfig\u001b[38;5;241m.\u001b[39mfrom_pretrained(\n\u001b[1;32m 532\u001b[0m pretrained_model_name_or_path,\n\u001b[1;32m 533\u001b[0m return_unused_kwargs\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m 534\u001b[0m trust_remote_code\u001b[38;5;241m=\u001b[39mtrust_remote_code,\n\u001b[1;32m 535\u001b[0m code_revision\u001b[38;5;241m=\u001b[39mcode_revision,\n\u001b[1;32m 536\u001b[0m _commit_hash\u001b[38;5;241m=\u001b[39mcommit_hash,\n\u001b[1;32m 537\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mhub_kwargs,\n\u001b[1;32m 538\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 539\u001b[0m )\n\u001b[1;32m 541\u001b[0m \u001b[38;5;66;03m# if torch_dtype=auto was passed here, ensure to pass it on\u001b[39;00m\n\u001b[1;32m 542\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs_orig\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtorch_dtype\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/transformers/models/auto/configuration_auto.py:1125\u001b[0m, in \u001b[0;36mAutoConfig.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, **kwargs)\u001b[0m\n\u001b[1;32m 1114\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m:\n\u001b[1;32m 1115\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 1116\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe checkpoint you are trying to load has model type `\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mconfig_dict[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m` \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1117\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbut Transformers does not recognize this architecture. This could be because of an \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1123\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`pip install git+https://github.com/huggingface/transformers.git`\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 1124\u001b[0m )\n\u001b[0;32m-> 1125\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m config_class\u001b[38;5;241m.\u001b[39mfrom_dict(config_dict, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39munused_kwargs)\n\u001b[1;32m 1126\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1127\u001b[0m \u001b[38;5;66;03m# Fallback: use pattern matching on the string.\u001b[39;00m\n\u001b[1;32m 1128\u001b[0m \u001b[38;5;66;03m# We go from longer names to shorter names to catch roberta before bert (for instance)\u001b[39;00m\n\u001b[1;32m 1129\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m pattern \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28msorted\u001b[39m(CONFIG_MAPPING\u001b[38;5;241m.\u001b[39mkeys(), key\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mlen\u001b[39m, reverse\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/transformers/models/timm_wrapper/configuration_timm_wrapper.py:106\u001b[0m, in \u001b[0;36mTimmWrapperConfig.from_dict\u001b[0;34m(cls, config_dict, **kwargs)\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpretrained_cfg\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_classes\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpretrained_cfg\u001b[39m\u001b[38;5;124m\"\u001b[39m]:\n\u001b[1;32m 104\u001b[0m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpretrained_cfg\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnum_classes\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[0;32m--> 106\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mfrom_dict(config_dict, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/transformers/configuration_utils.py:765\u001b[0m, in \u001b[0;36mPretrainedConfig.from_dict\u001b[0;34m(cls, config_dict, **kwargs)\u001b[0m\n\u001b[1;32m 763\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(current_attr, PretrainedConfig) \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(value, \u001b[38;5;28mdict\u001b[39m):\n\u001b[1;32m 764\u001b[0m value \u001b[38;5;241m=\u001b[39m current_attr\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mvalue)\n\u001b[0;32m--> 765\u001b[0m \u001b[38;5;28msetattr\u001b[39m(config, key, value)\n\u001b[1;32m 766\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m key \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtorch_dtype\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m 767\u001b[0m to_remove\u001b[38;5;241m.\u001b[39mappend(key)\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/transformers/configuration_utils.py:209\u001b[0m, in \u001b[0;36mPretrainedConfig.__setattr__\u001b[0;34m(self, key, value)\u001b[0m\n\u001b[1;32m 207\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m key \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__getattribute__\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mattribute_map\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m 208\u001b[0m key \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__getattribute__\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mattribute_map\u001b[39m\u001b[38;5;124m\"\u001b[39m)[key]\n\u001b[0;32m--> 209\u001b[0m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__setattr__\u001b[39m(key, value)\n",
"File \u001b[0;32m~/.local/lib/python3.12/site-packages/transformers/configuration_utils.py:355\u001b[0m, in \u001b[0;36mPretrainedConfig.num_labels\u001b[0;34m(self, num_labels)\u001b[0m\n\u001b[1;32m 352\u001b[0m \u001b[38;5;129m@num_labels\u001b[39m\u001b[38;5;241m.\u001b[39msetter\n\u001b[1;32m 353\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mnum_labels\u001b[39m(\u001b[38;5;28mself\u001b[39m, num_labels: \u001b[38;5;28mint\u001b[39m):\n\u001b[1;32m 354\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mid2label\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mid2label \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mid2label) \u001b[38;5;241m!=\u001b[39m num_labels:\n\u001b[0;32m--> 355\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mid2label \u001b[38;5;241m=\u001b[39m {i: \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLABEL_\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mi\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(num_labels)}\n\u001b[1;32m 356\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabel2id \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mdict\u001b[39m(\u001b[38;5;28mzip\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mid2label\u001b[38;5;241m.\u001b[39mvalues(), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mid2label\u001b[38;5;241m.\u001b[39mkeys()))\n",
"\u001b[0;31mTypeError\u001b[0m: 'NoneType' object cannot be interpreted as an integer"
]
}
],
"source": [
"from transformers import AutoModel, AutoImageProcessor\n",
"\n",
"# Load the model directly from Hugging Face\n",
"model = AutoModel.from_pretrained(\"paige-ai/Virchow2\")\n",
"processor = AutoImageProcessor.from_pretrained(\"paige-ai/Virchow2\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "173d0ea6",
"metadata": {},
"outputs": [],
"source": [
"import timm\n",
"import torch\n",
"from timm.data import resolve_data_config\n",
"from timm.data.transforms_factory import create_transform\n",
"from timm.layers import SwiGLUPacked\n",
"from PIL import Image\n",
"\n",
"# need to specify MLP layer and activation function for proper init\n",
"model = timm.create_model(\"hf-hub:paige-ai/Virchow2\", pretrained=True, mlp_layer=SwiGLUPacked, act_layer=torch.nn.SiLU)\n",
"model = model.eval()\n",
"\n",
"transforms = create_transform(**resolve_data_config(model.pretrained_cfg, model=model))\n",
"\n",
"image = Image.open(r\"/home/ubuntu/vnet/TaoST/Brand.jpg\")\n",
"image = transforms(image).unsqueeze(0) # size: 1 x 3 x 224 x 224\n",
"\n",
"output = model(image) # size: 1 x 261 x 1280\n",
"\n",
"class_token = output[:, 0] # size: 1 x 1280\n",
"patch_tokens = output[:, 5:] # size: 1 x 256 x 1280, tokens 1-4 are register tokens so we ignore those\n",
"\n",
"# concatenate class token and average pool of patch tokens\n",
"embedding = torch.cat([class_token, patch_tokens.mean(1)], dim=-1) # size: 1 x 2560"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|