Upload main.ipynb with huggingface_hub
Browse files- main.ipynb +7 -0
main.ipynb
CHANGED
|
@@ -915,6 +915,13 @@
|
|
| 915 |
"del ckpt # Free memory\n",
|
| 916 |
"torch.cuda.empty_cache()\n",
|
| 917 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 918 |
"print(f\"\\nReady to resume training from step {resume_step}\")\n",
|
| 919 |
"print(f\"Or skip to generation cells to use the model!\")\n"
|
| 920 |
]
|
|
|
|
| 915 |
"del ckpt # Free memory\n",
|
| 916 |
"torch.cuda.empty_cache()\n",
|
| 917 |
"\n",
|
| 918 |
+
"# Set up DataParallel if multiple GPUs available\n",
|
| 919 |
+
"if torch.cuda.device_count() > 1:\n",
|
| 920 |
+
" model_dp = nn.DataParallel(model_unwrapped, device_ids=[0, 1], output_device=0)\n",
|
| 921 |
+
" print(f\"\\nUsing {torch.cuda.device_count()} GPUs with DataParallel!\")\n",
|
| 922 |
+
"else:\n",
|
| 923 |
+
" model_dp = model_unwrapped\n",
|
| 924 |
+
"\n",
|
| 925 |
"print(f\"\\nReady to resume training from step {resume_step}\")\n",
|
| 926 |
"print(f\"Or skip to generation cells to use the model!\")\n"
|
| 927 |
]
|