{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "TuSjQA1mLSYU" }, "source": [ "# Chapter 9 - Video-Language Models" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "a14ycJDJLi68", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "302afd2e-8b46-4e84-fbca-cbce0ec731a8" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/10.4 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.7/10.4 MB\u001b[0m \u001b[31m80.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m10.4/10.4 MB\u001b[0m \u001b[31m198.4 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.4/10.4 MB\u001b[0m \u001b[31m120.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.6/13.6 MB\u001b[0m \u001b[31m179.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.2/41.2 MB\u001b[0m \u001b[31m76.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m163.5/163.5 kB\u001b[0m \u001b[31m22.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m23.8/23.8 MB\u001b[0m \u001b[31m143.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m520.7/520.7 kB\u001b[0m \u001b[31m58.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.7/60.7 MB\u001b[0m \u001b[31m46.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m47.6/47.6 MB\u001b[0m \u001b[31m64.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Building wheel for docopt (setup.py) ... \u001b[?25l\u001b[?25hdone\n" ] } ], "source": [ "# Here some installs that we will be using in multiple parts of the chapter\n", "!pip -q install -U transformers==5.2.0\n", "!pip -q install -U torchcodec huggingface_hub\n", "!pip -q install -U decord av qwen_vl_utils num2words faiss-cpu datasets peft bitsandbytes" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 331, "referenced_widgets": [ "48135d5683db40d5ac96bfe3a94d1597", "534edd2081034b2da978cf69e6ad59f1", "de74a555d6cc43049d28d137247e6a05", "f03d9d66c30b4652af74cf85f4fbe5ad", "38084b19d0c94c4bb67bb2ca4f8e00db", "28da51371045497cb3fa7e17017bcb10", "1a7043d1937b481c9ab944cfa81224d0", "c6339eb79877444ba033d7687ef16f8d", "2da12a658e914db49610a9c1c73ee8b5", "15031c1be3504e7e93297acf0e56354c", "c6080d5977e942209b43bd5e91edefac", "5e79580ee0ad455bbd92aab866cb3b51", "3363fd43f7e84defb71938a8dbceb572", "394323de55a144f681164de44b808ffd", "1626da0d92ec428e87f45f2eaf7dff3f", "121161d5494e40cfb9e6fad70ca2c4c3", "6b9ad4ec9ae747d6b2be9a549a786f83" ] }, "id": "1ds0adbXMC5l", "outputId": "ec946830-eaf7-4731-e2d1-680ba03beb70" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "48135d5683db40d5ac96bfe3a94d1597", "version_major": 2, "version_minor": 0 }, "text/plain": [ "VBox(children=(HTML(value='
| Step | \n", "Training Loss | \n", "
|---|---|
| 10 | \n", "1.519478 | \n", "
| 20 | \n", "0.856389 | \n", "
| 30 | \n", "0.745439 | \n", "
| 40 | \n", "0.714947 | \n", "
| 50 | \n", "0.736302 | \n", "
| 60 | \n", "0.717740 | \n", "
"
]
},
"metadata": {}
}
],
"source": [
"def collate(batch):\n",
" all_conversations = []\n",
" for ex in batch:\n",
" # Ensure required keys exist before trying to access them\n",
" if \"video link\" not in ex or \"text prompt\" not in ex or \"conversations\" not in ex or len(ex[\"conversations\"]) < 2:\n",
" print(f\"Skipping example due to missing or incomplete data: {ex.get('id', 'N/A')}\")\n",
" continue\n",
"\n",
" video_path = ex[\"video link\"]\n",
" question = ex[\"text prompt\"]\n",
" answer = ex[\"conversations\"][1][\"value\"]\n",
"\n",
" # Create a single conversation (list of message dictionaries) for this example\n",
" conversation = [\n",
" {\"role\":\"user\",\"content\":[\n",
" {\"type\":\"video\",\"path\":video_path},\n",
" {\"type\":\"text\",\"text\":question}]},\n",
" {\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":answer}]}\n",
" ]\n",
" all_conversations.append(conversation)\n",
"\n",
" if not all_conversations:\n",
" # Return an empty dictionary if no valid conversations were constructed\n",
" return {\"input_ids\": torch.tensor([]), \"attention_mask\": torch.tensor([]), \"labels\": torch.tensor([])}\n",
"\n",
" toks = tok.apply_chat_template(\n",
" all_conversations, # Pass the list of conversations (List[List[Dict]])\n",
" tokenize=True,\n",
" padding=True,\n",
" return_dict=True,\n",
" return_tensors=\"pt\"\n",
" )\n",
" return {\"input_ids\": toks[\"input_ids\"],\n",
" \"attention_mask\": toks[\"attention_mask\"],\n",
" \"labels\": toks[\"input_ids\"]}\n",
"\n",
"args = TrainingArguments(\n",
" your_model_folder, per_device_train_batch_size=4,\n",
" gradient_accumulation_steps=8, num_train_epochs=1, fp16=True,\n",
" learning_rate=2e-4, lr_scheduler_type=\"cosine\", warmup_ratio=0.03,\n",
" logging_steps=10, save_total_limit=2,\n",
" remove_unused_columns=False # Add this to prevent column removal by Trainer\n",
")\n",
"\n",
"Trainer(model=model, train_dataset=train_ds,\n",
" data_collator=collate, args=args).train()\n",
"model.save_pretrained(your_model_folder)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 248,
"referenced_widgets": [
"65d0a085c32e410bae5e889086673183",
"5c5e7f4c4c274435b7c8200fed3c2af4",
"7a94948e9d5a48ceb88b5ed26c1566bd",
"226e267c66cc4232bf11b4c62d25fab4",
"45b1a2a5225e4db2bb65e19da9129b64",
"1b51f45b0f2746188d372442a8aafa11",
"3ecd7dd20188419bbcd0855404fe228d",
"b92ac54e7848482d932d731474ab26a2",
"b699399a76b94dbea7e04d5e852d4f29",
"cf8687b565a644b59bcab7ea1c3e16b3",
"5c61a55db945430b80dd2ce7f0df5ac8",
"bb8062a9db0c42c4b8c6f88061130c57",
"87ceff8f62d9468da6cf505d0d6aae93",
"ec7db76cac3b4e8a81784c440a14c247",
"525399528bc54c0b928537fcb81bf4eb",
"da3ca8489cb34c779476dfbcfcfd52c3",
"88e8e360823b4901a04adafb0e352b5b",
"231903c76fd241b89d104300433975cb",
"ff5f0355efe1428c956308173060969c",
"2a6a5d92c0a348c0aaca8bbf664912bb",
"94379112ee5e43e898351005014e0202",
"774cdebff3ef4b69bc62b97c5caa3e98",
"21945612ba4c40ec943d33f77794bfd7",
"30b0859dacf5452fb61beb704183d0e5",
"7be39816f81c410eb3899ebf3d565578",
"8810aaf657834f168d798e47d170bea3",
"387acec5474a4d9393ad0d048d2ff6d9",
"5a1bd3d9d6d64dee95d789bc30d82ae3",
"dc023689144140f99298e9559860c5e6",
"ffbd616e705f4b16b8df7f4589286f56",
"0527cdbce0a24dae825cb7a95fe45d81",
"030a44142d4a4815a15fa1a9cd0b4f3b",
"ad4de7ddc58a4b02bb34e99ed6a484bf",
"a854a0c760be4f85a2646f20c56a7d11",
"b2188da2c959467b9333be65d3c4f263",
"12f6065989064957b3b8a3f877b149e7",
"350b69b057e346f4adaf3c90ece3dcda",
"39421f248cfc4dd1938c7e51408dd96d",
"14d03b195ef44c2cba7f058b61260e33",
"7a545fb3a497431ca0662fe48a5243ea",
"97580f89e83d43fe8a712a92bdeb6d11",
"81697acb646149bb865855b5d4fd4250",
"c353da6828f54a148da7440cf22c4fcf",
"e2d9018b108048b78fb8da3b3e338f52",
"6ea62c47df6d4e1cab0a39edadd7cb9f",
"02ba170612d54afda9aba4b69b6c46b9",
"99099eb02ff04ad98d79e5a8d6d222f3",
"a1be81293007477bacc8c028df29e477",
"c28169a69b00452d9b68a81823f6bce7",
"8eceb5299c7748d694928f6ce7f4b99c",
"fa2520ffe9c94bfe835a2d3df4b17bf7",
"58628974056e4a099ac9efe67cbdba9e",
"421512854f39478ba5fb9a05a524fa9e",
"b0feb8aa77164ab2ab6e300b69acd58a",
"5f2a971e42314282b3581bff7af699a3"
]
},
"id": "CrLdgBpda4oE",
"outputId": "efcc2d40-db3f-4174-c3da-5f38b0566ba8"
},
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"`torch_dtype` is deprecated! Use `dtype` instead!\n"
]
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"Loading weights: 0%| | 0/489 [00:00, ?it/s]"
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "65d0a085c32e410bae5e889086673183"
}
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"README.md: 0.00B [00:00, ?B/s]"
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "bb8062a9db0c42c4b8c6f88061130c57"
}
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"Processing Files (0 / 0) : | | 0.00B / 0.00B "
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "21945612ba4c40ec943d33f77794bfd7"
}
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"New Data Upload : | | 0.00B / 0.00B "
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "a854a0c760be4f85a2646f20c56a7d11"
}
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
" ...adapter_model.safetensors: 3%|3 | 639kB / 20.0MB "
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "6ea62c47df6d4e1cab0a39edadd7cb9f"
}
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"CommitInfo(commit_url='https://huggingface.co/mfarre/smolvlm2-vide-qlora-adapter/commit/3d9f416b721ba01ca0e2fa978114f705b053191a', commit_message='Upload model', commit_description='', oid='3d9f416b721ba01ca0e2fa978114f705b053191a', pr_url=None, repo_url=RepoUrl('https://huggingface.co/mfarre/smolvlm2-vide-qlora-adapter', endpoint='https://huggingface.co', repo_type='model', repo_id='mfarre/smolvlm2-vide-qlora-adapter'), pr_revision=None, pr_num=None)"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
}
},
"metadata": {},
"execution_count": 24
}
],
"source": [
"# We can load the adapter that way:\n",
"your_model_repo = '