{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "TuSjQA1mLSYU" }, "source": [ "# Chapter 9 - Video-Language Models" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "a14ycJDJLi68", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "302afd2e-8b46-4e84-fbca-cbce0ec731a8" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/10.4 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.7/10.4 MB\u001b[0m \u001b[31m80.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m10.4/10.4 MB\u001b[0m \u001b[31m198.4 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.4/10.4 MB\u001b[0m \u001b[31m120.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.6/13.6 MB\u001b[0m \u001b[31m179.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.2/41.2 MB\u001b[0m \u001b[31m76.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m163.5/163.5 kB\u001b[0m \u001b[31m22.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m23.8/23.8 MB\u001b[0m \u001b[31m143.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m520.7/520.7 kB\u001b[0m \u001b[31m58.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.7/60.7 MB\u001b[0m \u001b[31m46.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m47.6/47.6 MB\u001b[0m \u001b[31m64.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Building wheel for docopt (setup.py) ... \u001b[?25l\u001b[?25hdone\n" ] } ], "source": [ "# Here some installs that we will be using in multiple parts of the chapter\n", "!pip -q install -U transformers==5.2.0\n", "!pip -q install -U torchcodec huggingface_hub\n", "!pip -q install -U decord av qwen_vl_utils num2words faiss-cpu datasets peft bitsandbytes" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 331, "referenced_widgets": [ "48135d5683db40d5ac96bfe3a94d1597", "534edd2081034b2da978cf69e6ad59f1", "de74a555d6cc43049d28d137247e6a05", "f03d9d66c30b4652af74cf85f4fbe5ad", "38084b19d0c94c4bb67bb2ca4f8e00db", "28da51371045497cb3fa7e17017bcb10", "1a7043d1937b481c9ab944cfa81224d0", "c6339eb79877444ba033d7687ef16f8d", "2da12a658e914db49610a9c1c73ee8b5", "15031c1be3504e7e93297acf0e56354c", "c6080d5977e942209b43bd5e91edefac", "5e79580ee0ad455bbd92aab866cb3b51", "3363fd43f7e84defb71938a8dbceb572", "394323de55a144f681164de44b808ffd", "1626da0d92ec428e87f45f2eaf7dff3f", "121161d5494e40cfb9e6fad70ca2c4c3", "6b9ad4ec9ae747d6b2be9a549a786f83" ] }, "id": "1ds0adbXMC5l", "outputId": "ec946830-eaf7-4731-e2d1-680ba03beb70" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "48135d5683db40d5ac96bfe3a94d1597", "version_major": 2, "version_minor": 0 }, "text/plain": [ "VBox(children=(HTML(value='
str:\n", " messages = [{\n", " \"role\": \"user\",\n", " \"content\": [\n", " {\"type\": \"video\", \"path\": video_path},\n", " {\"type\": \"text\", \"text\": prompt},\n", " ],\n", " }]\n", " inputs = processor.apply_chat_template(\n", " messages,\n", " tokenize=True,\n", " add_generation_prompt=True,\n", " return_dict=True,\n", " return_tensors=\"pt\",\n", " do_sample_frames=True,\n", " num_frames=num_frames,\n", " ).to(model.device)\n", " if \"pixel_values_videos\" in inputs:\n", " inputs[\"pixel_values_videos\"] = inputs[\"pixel_values_videos\"].to(dtype)\n", " with torch.no_grad():\n", " out = model.generate(**inputs, max_new_tokens=128, do_sample=False)\n", "\n", " prompt_len = inputs[\"input_ids\"].shape[-1]\n", " return processor.batch_decode(\n", " out[:, prompt_len:], skip_special_tokens=True\n", " )[0].strip()\n", "\n", "\n", "# Captioning\n", "print(\"CAPTION:\\n\", ask_video(minecraft_mp4, \"Write a one-sentence caption for this video.\"))\n", "\n", "# Summarization\n", "print(\"\\nSUMMARY:\\n\", ask_video(minecraft_mp4, \"Summarize the video in 3 bullet points.\"))\n", "\n", "# Video QA\n", "print(\"\\nQA:\\n\", ask_video(spaghetti_mp4, \"What is the person doing? Answer briefly.\"))" ] }, { "cell_type": "markdown", "metadata": { "id": "atoJoRd0G3zh" }, "source": [ "# 9.3.2 Retrieval Pipelines That Scale\n" ] }, { "cell_type": "markdown", "metadata": { "id": "bNp8dxIIyqva" }, "source": [ "### Step 1: Segment videos\n", "We prepare a function to split videos in small segments" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "oy09p744G46H" }, "outputs": [], "source": [ "def segment_video(\n", " video_path: str, out_dir: str, segment_seconds: int = 5\n", ") -> List[str]:\n", " \"\"\"\n", " Split a video into fixed-length MP4 segments using ffmpeg.\n", " Returns sorted list of segment file paths.\n", " \"\"\"\n", " out_dir_path = Path(out_dir).resolve()\n", " out_dir_path.mkdir(parents=True, exist_ok=True)\n", " pattern = str(out_dir_path / \"seg_%05d.mp4\")\n", "\n", " subprocess.run(\n", " [\n", " \"ffmpeg\", \"-y\",\n", " \"-i\", video_path,\n", " \"-c\", \"copy\", # stream copy — fast, no re-encoding\n", " \"-map\", \"0\",\n", " \"-f\", \"segment\",\n", " \"-segment_time\", str(segment_seconds),\n", " \"-reset_timestamps\", \"1\",\n", " pattern,\n", " ],\n", " check=True,\n", " stdout=subprocess.DEVNULL,\n", " stderr=subprocess.DEVNULL,\n", " )\n", " # Return absolute paths for the segments\n", " return sorted(str(p.resolve()) for p in out_dir_path.glob(\"seg_*.mp4\"))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ "39f6a6eb689f4262af439b375d5b2abb", "d0b891c5fae14455b41eb10ffdedc936", "a52a8aa9f3054101a9ba258d38d803e7", "9e5ba30d525845279fddde78d88c47f2", "9843fd65e959478294017a5c0acbdef4", "1e3779dea0464aec858d841e2ddb429d", "8ca41427f84c4d4395c14432eb92fc2f", "427252bbdc524c84a98f2353074136a5", "f1805253451342ce90a0d8afc25208af", "1c1fbace15a046de89bf42a4f0167028", "cfd79a9d5caf4bf4bef40810b7ee691b" ] }, "id": "DxO03PaTy91Q", "outputId": "a74fb9db-c300-4166-d4c2-ec12efa9822e" }, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "Loading weights: 0%| | 0/625 [00:00 np.ndarray:\n", " \"\"\"Embed a text query. Returns (D,) numpy array, L2-normalized.\"\"\"\n", " emb = embedder.process([{\"text\": text}])\n", " return emb[0].detach().cpu().float().numpy()\n", "\n", "def embed_video_segment(path: str, fps: float = 1.0, max_frames: int = 32) -> np.ndarray:\n", " \"\"\"Embed a video segment. Returns (D,) numpy array, L2-normalized.\"\"\"\n", " emb = embedder.process([{\"video\": path, \"fps\": fps, \"max_frames\": max_frames}])\n", " return emb[0].detach().cpu().float().numpy()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "iQu8FLq3z-eg", "outputId": "ab0448fd-e60f-46a6-f9b6-16e3d3837c05" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Total segments: 11\n" ] } ], "source": [ "\n", "#Segment, embed, and index our demo videos\n", "\n", "all_segments: List[str] = []\n", "segment_meta: List[Dict] = []\n", "\n", "for vid in [spaghetti_mp4, minecraft_mp4]:\n", " out_dir = Path(\"video_segments\") / Path(vid).stem\n", " segs = segment_video(vid, str(out_dir), segment_seconds=5)\n", " for seg in segs:\n", " all_segments.append(seg)\n", " segment_meta.append({\"source_video\": vid, \"segment_path\": seg})\n", "\n", "print(f\"Total segments: {len(all_segments)}\")\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "cExp68Pt0ejE" }, "outputs": [], "source": [ "\n", "# Embed all segments (offline step — do this once)\n", "vectors = np.stack(\n", " [embed_video_segment(seg) for seg in all_segments], axis=0\n", ") # (N, D)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "4PtNc1CL0hJx" }, "outputs": [], "source": [ "\n", "# Build FAISS index. Since embeddings are L2-normalized,\n", "# inner product (IndexFlatIP) equals cosine similarity.\n", "index = faiss.IndexFlatIP(vectors.shape[1])\n", "index.add(vectors.astype(np.float32))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "nWItL6yE0ixr", "outputId": "fb8a19b6-730a-4064-f5b1-fe4646179022" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "Query: someone eating pasta\n", " score=0.729 seg=/content/video_segments/eating_spaghetti/seg_00000.mp4\n", " score=0.507 seg=/content/video_segments/eating_spaghetti/seg_00001.mp4\n", " score=0.329 seg=/content/video_segments/09KmKSz4r_Y/seg_00003.mp4\n", "\n", "Query: a videogame\n", " score=0.600 seg=/content/video_segments/09KmKSz4r_Y/seg_00003.mp4\n", " score=0.570 seg=/content/video_segments/09KmKSz4r_Y/seg_00006.mp4\n", " score=0.562 seg=/content/video_segments/09KmKSz4r_Y/seg_00005.mp4\n" ] } ], "source": [ "\n", "# Query function\n", "def search_segments(query: str, k: int = 5) -> List[Tuple[float, Dict]]:\n", " \"\"\"Retrieve top-k segments for a text query.\"\"\"\n", " q = embed_text(query)[None, :] # (1, D)\n", " scores, ids = index.search(q.astype(np.float32), k)\n", " return [\n", " (float(s), segment_meta[i])\n", " for s, i in zip(scores[0], ids[0])\n", " if i >= 0\n", " ]\n", "\n", "# Try it\n", "for q in [\"someone eating pasta\", \"a videogame\"]:\n", " print(f\"\\nQuery: {q}\")\n", " for score, meta in search_segments(q, k=3):\n", " print(f\" score={score:.3f} seg={meta['segment_path']}\")" ] }, { "cell_type": "markdown", "metadata": { "id": "YPuk-vfbchX3" }, "source": [ "### Optional: ReRank" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 465, "referenced_widgets": [ "bc5ca56ae39a4824bb95c607026076da", "c03e2bddeda34398bb5e4ea7818549ff", "a3b75ff09bae469bba9c2e1e20e419fc", "b1137e4a767847698905b23be25f346a", "e6e274b1060e48fb944e2e39ab815aad", "fc0eed5efad54e99a9647b1a11b3243e", "eada1b3612014e86b24445d10afc263a", "6fb6b35c26e44b2b9116fbfdbd29381c", "d704143dc56c4a1bb8b38d64cf78df1f", "caf3438a5ce64a51bab97b72c9bb329c", "040ab494cad244dab4c5f727aba122bd", "817bef6866884bc8ab997aa4b2280661", "044e351b330348da97bf68f5582b316f", "1e9e638412b8432883e428f902265e8d", "e1be340af5e349dd862523133f8c9800", "964939558f194f828efe75d070aa1ca6", "f9f0bb0ddada4180b557c030745ec089", "f8b96bc6922042079b73899616ea08e3", "5e708a5c44ed4311a3db2cd0edd8bdf8", "6b462a9720af4331b9c30a2bffc3fa10", "e244b8451d71495eafc0b02d5c51f3c2", "b54517f658e54be48160c33744336b98", "40f6c04d250349b994bdfcfc0651a4e8", "01058dd6f37144199362fde0a2cd913c", "a9e8a9f4690e4db5abaf3847e677b12b", "e7c5f48a50a3496288c454ee6f91fb3b", "90903de228ba43f788938abab945eada", "c579ae966cfc407ebc441d99a9bbe66d", "5050b7f18c004ce6b7b64ccc2494bc6c", "59270d06591d4169afd71290d6a58dc6", "076fe8c214b9455c804451dd3ec4080e", "8e6929fb7a394363b91e53222089238c", "adbb2e6e8754401bbea23375d1e80dd9", "f7e61959e1f14ac0aed243ef50ae0e3c", "b59ccc27219443fe90ff733a4f875eca", "7577650b47714b10a9b84d5d22505cba", "67bc3ae93e034a60aca156141768c8be", "2d6c8a410a804d96a7be8e7751b3a5ac", "51f69db1640c4bca927690a7a78b39cf", "d179ea186d214bb3abf0e4f1fdc8a8d9", "0a1d7b91e1d747248c16d351e29f8f98", "653f4394eef447c9b8152ba0fe0f90b0", "fe3d548547834be1a5714d91cc236705", "dd778ded9fee4817bbf324a24a054bb0", "e8568139de7f4446b00b0806afc0e99a", "f6987cefb33f434593c3f194eb43aa23", "4d4b9756bafc42d4813ba13a331e40eb", "41a388b09a9f4473a393a883e0741ee4", "a4010d3d132848ae8a264756f794ea12", "76bfb788fdc84d9aa0d5ad4b0e412af0", "b6e028312dc149599a6c4bb2e2cf787b", "dbf32ff7227542a3831c85616e01d777", "ebc14ab3ec7d4aaab4def8f821329de0", "2e16b09298074eea8db303cc61b6be9c", "80b01d8e4c664914854576d3d2bf2420", "50903c8c7cd848a7b79a4bff1951c592", "1b83f7cd00b64a04ac20a90d05a629a2", "df2a75ad2f1848b7990e50ca21b52a8a", "257d328ef5994c2c9b5c51132d967845", "8d7894fd93af4327bfda96d0b07fb540", "6d09abcb63d9400e8345c7b54efbf60c", "642a54316d0f46b886179a2385716975", "07eb92a3a9ac4d26b0bff32f1563f2f5", "dd7fc3e99b6b4c13a8f6449fb21119e5", "edfb9e44859c48789df13802b054ef08", "fb701fe45bfb4efb8779ffd24bbf13e2", "7d61f693c2b244b882f248067d337c7d", "491ccfdde29a46ec86b6e55b852bb858", "c1a9822c0b7344ea984c77f7d95a9e5a", "e3ef30b1dc594658999b5de9e0b89c2f", "174866dfd1fa4c56b4aa055d52bd4255", "e742b7edba1246ca9c203a9a8d23cc90", "ad4e7a83fef74eaf92ee49bd072c5f64", "16820a26828545d2aab10f873ee41f41", "4e2637d6e902458d9d2fecba39633def", "18fc74aa20bf4723994be04bd8d6ad95", "82712102bb814809a183a177f83f3714", "ddf58b8debf145b0bc1f9966551b0d35", "0d9602402e514b5694faab0d583a1103", "7a1ad78a592b404386dcb7fd098c036c", "382dab58c01d40afbdc75dbee9b9f5d6", "205e14de3b1e49ef85e9331e7239ad50", "a9103cf1307a468bb15ac3aed3f937c8", "6497c9ee64c44e1faa9d26c9fdf9add6", "17368607eb2a4dd7a65cf35dde6275fe", "73da81c1ad87469fbf09960d738ccebe", "15719579d5d04f78aef74b6f7f0102e3", "ebe16cad608749d5a9959320ae614464", "01cad5bb781a4fb88ecf0ebd34be2090", "a7309066a2244ce2ad756d0a1634e2d2", "6d90024bce5c4d218e3e2e0df7bf9e73", "616f73bb531c43879f631eea22b32c8b", "f13b05ca20d94607ac2cc25e5444c9c5", "30d19bb8d84a4c37902ddae688416c2d", "d97aa9a0507e4b9c8cc33d794bf631b7", "9c4185e304a84927b68583ab666a8b2e", "8f2308bc07ba4933ad1409b14f1cf19e", "e93270fb208949d09471ead75d19da35", "cb44d9e393fc4c7597660e58271a096f", "0b9a125c486447ef8215c89c7e006b01", "cd5c094ce28442fbb946a920a5338a04", "29e12bad77e3491e804e90a2ea8db6ad", "3af51c4900d945738e3d790ee01e0c70", "444e328be51d4665aa5c5b954c952e98", "79fc4025f59c49ccb39558d980944d40", "24077c6c1ec84cffa259f4eaedcae53e", "458e409f95254834872131f21f2a95f4", "b1120a88ad5c41f3b5fed72e4dbbd2d6", "60073b3fe7364c77bef3edd0c01a9ff7", "7b897d5ef0f44f66a2d498c7d48c5c91", "483dde24d0a14a9684c83188ed0b26e6", "a17f173eecbe4563b22b15c703aea423", "82d331ee31d547bb86298722aaaaa189", "6a6a3d9088a344c6b1813fbbcf46326d", "d1d0328a004142479664d7a58f9e3d50", "89248263858b40d29baf681bb73f5df7", "579d4aaae94c4bc6866e63c3b620c23e", "833514a28e244f7484b2c9c4f05b155e", "ae8d03a03dd3473aa4fb22bb830a4b31", "7dddf56c3355415cb0e90c5ffc69326e", "538d9cc0f3e442a89906a834000f62de", "14c037b7cf8a42f492a4a48a8aa0c82e", "b56ea77a901e495cb5e937d390c78953", "87290d60ad424ca79efa838da61a76f3", "315e2e858aa5475d83e02c98df719540", "8e673a8527374f889da61e25c6fc6771", "34f6d51f814d4fa590ea49fbcd2d5a54", "aeea997163f84575a8060782c7c0fde6", "5dbbf8f5aca64eceb862ba27f4d63200", "d01dc85668de41d7bc0eb62d184e9334", "5d41bb25cb7740b2b3361fc50322ad39", "9411fa1807ef45b08b773aee689dde4f", "d9e2038e8f674e7783ba3a853abb4fd2", "c8fb44359ab64601976ce4e5d494a620", "ec63e39ef47e4b88a673a33445f78406", "70d7044af4684647a14b5de7b6bc80c1", "72b9fbaa2bfc47e9bfa02768a3294314", "1904248ba7b2493abf37e2681d02670d", "9024111110934c84b0e016bf5cce3562", "af94ae0e623345babd2887da38c46501", "b389a7266fdd485ba2158d614047e03f", "ea250575648a465b83c2c98bd9bca1e1", "ea1a341091404c87b6a4f64deea540a7", "36198a90cd2e42858748265f5d63d095", "3011485e755346c29e81b97405f6ef31", "e11643359d1e4e18803d5fc802671b7b", "f5384bfed6b146cab84773c8beba8a2b", "cd2ecd7cc8c141c6a2b9916b62760e30", "0f8a8605f247476abaa1d9a569c73afd", "45d2caf136ff424db1bc2481a374bafb", "538e2a41ecd740dfb5c0fe91b7808955", "3eed3dcc7fc143f28d0cde5770790026", "9be02c2fb64c48a7b5e19a2490ece474", "fdd8306fa84f4e18a481e84e40a9d78a" ] }, "id": "kCd_u_B9cmZt", "outputId": "ff38088f-c4e6-4b91-a77c-f46d597705d3" }, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "qwen3_vl_reranker.py: 0.00B [00:00, ?B/s]" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "bc5ca56ae39a4824bb95c607026076da" } }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "config.json: 0.00B [00:00, ?B/s]" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "817bef6866884bc8ab997aa4b2280661" } }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "model.safetensors: 0%| | 0.00/4.26G [00:00 List[Tuple[float, Dict]]:\n", " \"\"\"Re-score retrieved segments with a cross-encoder for higher precision.\"\"\"\n", " payload = {\n", " \"query\": {\"text\": query},\n", " \"documents\": [{\"video\": c[\"segment_path\"]} for c in candidates],\n", " \"fps\": fps,\n", " \"max_frames\": max_frames,\n", " }\n", " scores = reranker.process(payload) # list[float], aligned with candidates\n", " return sorted(zip(scores, candidates), key=lambda x: x[0], reverse=True)\n", "\n", "# Two-stage retrieval\n", "query = \"someone eating pasta\"\n", "\n", "# Stage 1: fast approximate retrieval (embedding + FAISS)\n", "stage1 = search_segments(query, k=10)\n", "candidates = [meta for _, meta in stage1]\n", "\n", "# Stage 2: precise reranking (cross-encoder)\n", "ranked = rerank_segments(query, candidates)\n", "\n", "print(f\"Query: {query}\")\n", "print(\"Top reranked segments:\")\n", "for score, meta in ranked[:5]:\n", " print(f\" rerank_score={score:.3f} seg={meta['segment_path']}\")" ] }, { "cell_type": "markdown", "metadata": { "id": "cds6i2Dzc7Ea" }, "source": [ "# 9.3.3 Video-RAG: Retrieval-Augmented Video QA" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ "131d930966834363ac98969ac37e0ce4", "424e1b4da84e4eb9905427e81bc62cfd", "9edfd817bee648968622406b142d65be", "9c633ab349b44720825ae8b0ec20dfdb", "4549b38dfbf047d5859135c126abb90f", "c1593bb526d14e288837711f6fffb9de", "0236662167ca4dc8b815d935e8ec614a", "850736fd37cf44f3951340948bf92ffc", "0ba0bdad221e45c39436778160613561", "c7d473a3920246ea93a88a30226945b5", "f1b10ecef056436193a7beeda6d856a9" ] }, "id": "dRLQDIBadCWh", "outputId": "e718fd34-86a7-4436-bbfc-c3cc968a3987" }, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "Loading weights: 0%| | 0/489 [00:00 str:\n", " \"\"\"\n", " Feed multiple retrieved video segments plus a question to SmolVLM2.\n", " Each segment becomes a separate {\"type\": \"video\"} entry in the\n", " chat message, so the model sees all of them as context.\n", " \"\"\"\n", " content = [{\"type\": \"video\", \"path\": p} for p in segment_paths]\n", " content.append({\"type\": \"text\", \"text\": question})\n", "\n", " messages = [{\"role\": \"user\", \"content\": content}]\n", "\n", " inputs = rag_processor.apply_chat_template(\n", " messages,\n", " tokenize=True,\n", " add_generation_prompt=True,\n", " return_dict=True,\n", " return_tensors=\"pt\",\n", " do_sample_frames=True,\n", " num_frames=num_frames,\n", " ).to(rag_model.device)\n", "\n", " if \"pixel_values_videos\" in inputs and torch.is_floating_point(\n", " inputs[\"pixel_values_videos\"]\n", " ):\n", " inputs[\"pixel_values_videos\"] = inputs[\"pixel_values_videos\"].to(dtype)\n", "\n", " with torch.no_grad():\n", " out = rag_model.generate(\n", " **inputs, do_sample=False, max_new_tokens=max_new_tokens\n", " )\n", " prompt_len = inputs[\"input_ids\"].shape[-1]\n", " return rag_processor.batch_decode(\n", " out[:, prompt_len:], skip_special_tokens=True\n", " )[0].strip()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 214 }, "id": "sso7OyaBnTe9", "outputId": "2f13a4f9-99e0-42f5-f9c8-1abfaf762184" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Question: Which food do you see in the video?\n", "\n", "Retrieved 5 segments:\n", " score=0.516 /content/video_segments/eating_spaghetti/seg_00001.mp4\n", " score=0.477 /content/video_segments/eating_spaghetti/seg_00000.mp4\n", " score=0.399 /content/video_segments/09KmKSz4r_Y/seg_00003.mp4\n", " score=0.391 /content/video_segments/09KmKSz4r_Y/seg_00006.mp4\n", " score=0.386 /content/video_segments/09KmKSz4r_Y/seg_00007.mp4\n", "\n", "Answer: spaghetti\n" ] }, { "output_type": "execute_result", "data": { "text/plain": [ "'spaghetti'" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 18 } ], "source": [ "# Here is the pipeline - first retrieve and then asnwer\n", "\n", "def video_rag(question: str, k: int = 5) -> str:\n", " \"\"\"\n", " End-to-end Video-RAG: retrieve relevant segments, then generate\n", " a grounded answer using only those segments as context.\n", " \"\"\"\n", " # Step 1: Retrieve (fast — embedding + FAISS)\n", " retrieved = search_segments(question, k=k)\n", " segment_paths = [meta[\"segment_path\"] for _, meta in retrieved]\n", "\n", " # Step 2: Generate (expensive — but only over k segments, not the whole library)\n", " prompt = (\n", " \"Answer the question using ONLY the provided video clips. \"\n", " \"If the clips do not contain enough information, say so.\\n\\n\"\n", " f\"Question: {question}\"\n", " )\n", " answer = answer_with_segments(segment_paths, prompt)\n", "\n", " print(f\"Question: {question}\")\n", " print(f\"\\nRetrieved {len(segment_paths)} segments:\")\n", " for score, meta in retrieved:\n", " print(f\" score={score:.3f} {meta['segment_path']}\")\n", " print(f\"\\nAnswer: {answer}\")\n", " return answer\n", "\n", "video_rag(\"Which food do you see in the video?\", k=5)\n" ] }, { "cell_type": "markdown", "metadata": { "id": "dyx_UmhJpNhD" }, "source": [ "## 9.3.4 Fine-Tuning a Video Language Model for Your Domain\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 177, "referenced_widgets": [ "44f65ec249ed41f5a32e1540982a45aa", "1fc11185649448269433f703a96a2d2c", "0e32e2356a234aecb99e8fe8955fc433", "497e3568073249d3b4925656d6212e52", "596db5e6aad24a8698aee7e787b572d3", "82d6a0fbd7e747cdba2597549fe90525", "69cc5862198c4c7d9a568cbf909ea9e4", "f95b0d2eee6d4342b8ad356617e222c8", "32c4534d3e5c47639f3597b5442cdb0d", "e22862d088f746ea9cc548595eb94b72", "4969fd579bf5466991c33a953c110a19", "22016ebd5c774b71b31d3a2e5678b54d", "db5797ccb0644af29abd88840681430a", "f64c1cdf1ca440b3a9026035c43132bf", "85496f2f2dab4baea475bf1932c0e42a", "2cc10c5e43b24068bed9b9b2427f505d", "f5a346e7c0eb4b73aef157eb5af4d9ff", "1914ea4e030f49398f82638455d055bb", "795271815d3f4310910ea2907506b968", "f50b9a888dde42929a125c7a707c9d4b", "bdf21b8bdefe494b9ce26b0f392ae235", "5d4054325922409a8504277eb20850ba", "ba6b1a10b1964cbab94923a2d7324777", "75fc75aac65743fc906a94752f65ecd2", "57434838af5f4b239f3d0fe82c3f6a1d", "d328cd1ae21342d19952c004f747e317", "c9ffac940ace408f999b9673b9c73ed1", "2f9972a275484804af4b13ac82430acd", "767422a469fb47c0b11d518d6a0bb015", "a8bb4e81e2214ae18d138281d7e00276", "37c6512e07a24462a1906bddfeaa2aa0", "bec954fdffed41359683aef59d84d5b7", "6d3ec6efdc6a4784a3f4b163207526a4", "9fee077c9f0c41f88941c621f295466b", "b3bfacc853f34335bc2735f124e8e9cb", "ebbf934dd5734cf7a9aab9cd40c1f11b", "ee35aed827754b30abfb018a2fb34826", "51d09426fc2f454abc4245ea88d4ba14", "bdd09a32c45b41e2a97c89c503bd4075", "492aa74145614bca80585f95fa687c9f", "67cb8d47b5f046e79dbf697f025ef10a", "9b832dc299bc4a0484382789745de9c3", "2f43b2173f3e4f0a924ef40279f3c59a", "eb881852888d45ab996461591f87b098", "a069126bb2104ef7bd6b87302f55b265", "495a6938fc3d46e899c16b498684b7cc", "e3762aed113b4653a55e2b63d248c6be", "00504b6655c34e3b9b540a80d8928ac8", "f1bba53cb0e34444a8c77b233c296204", "03390e62a447431d9d9d0100df026d50", "735f1cea9a40430f8b838155073717af", "606053abbd1a412fb420498937d3c33b", "15f1c644b2fc41bbb09fa971215fb01d", "53836ca3bd4a495f9754031f60999440", "e6710a5b919540918f84392e04a54114" ] }, "id": "JpNm216qqmkH", "outputId": "5bd6e9e7-44cb-4419-ebfc-945deb35c1b6" }, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "README.md: 0.00B [00:00, ?B/s]" ], "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, "model_id": "44f65ec249ed41f5a32e1540982a45aa" } }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "real/test-00000-of-00001.parquet: 0%| | 0.00/35.8k [00:00" ], "text/html": [ "\n", "
\n", " \n", " \n", " [63/63 02:23, Epoch 1/1]\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
StepTraining Loss
101.519478
200.856389
300.745439
400.714947
500.736302
600.717740

" ] }, "metadata": {} } ], "source": [ "def collate(batch):\n", " all_conversations = []\n", " for ex in batch:\n", " # Ensure required keys exist before trying to access them\n", " if \"video link\" not in ex or \"text prompt\" not in ex or \"conversations\" not in ex or len(ex[\"conversations\"]) < 2:\n", " print(f\"Skipping example due to missing or incomplete data: {ex.get('id', 'N/A')}\")\n", " continue\n", "\n", " video_path = ex[\"video link\"]\n", " question = ex[\"text prompt\"]\n", " answer = ex[\"conversations\"][1][\"value\"]\n", "\n", " # Create a single conversation (list of message dictionaries) for this example\n", " conversation = [\n", " {\"role\":\"user\",\"content\":[\n", " {\"type\":\"video\",\"path\":video_path},\n", " {\"type\":\"text\",\"text\":question}]},\n", " {\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":answer}]}\n", " ]\n", " all_conversations.append(conversation)\n", "\n", " if not all_conversations:\n", " # Return an empty dictionary if no valid conversations were constructed\n", " return {\"input_ids\": torch.tensor([]), \"attention_mask\": torch.tensor([]), \"labels\": torch.tensor([])}\n", "\n", " toks = tok.apply_chat_template(\n", " all_conversations, # Pass the list of conversations (List[List[Dict]])\n", " tokenize=True,\n", " padding=True,\n", " return_dict=True,\n", " return_tensors=\"pt\"\n", " )\n", " return {\"input_ids\": toks[\"input_ids\"],\n", " \"attention_mask\": toks[\"attention_mask\"],\n", " \"labels\": toks[\"input_ids\"]}\n", "\n", "args = TrainingArguments(\n", " your_model_folder, per_device_train_batch_size=4,\n", " gradient_accumulation_steps=8, num_train_epochs=1, fp16=True,\n", " learning_rate=2e-4, lr_scheduler_type=\"cosine\", warmup_ratio=0.03,\n", " logging_steps=10, save_total_limit=2,\n", " remove_unused_columns=False # Add this to prevent column removal by Trainer\n", ")\n", "\n", "Trainer(model=model, train_dataset=train_ds,\n", " data_collator=collate, args=args).train()\n", "model.save_pretrained(your_model_folder)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 248, "referenced_widgets": [ "65d0a085c32e410bae5e889086673183", "5c5e7f4c4c274435b7c8200fed3c2af4", "7a94948e9d5a48ceb88b5ed26c1566bd", "226e267c66cc4232bf11b4c62d25fab4", "45b1a2a5225e4db2bb65e19da9129b64", "1b51f45b0f2746188d372442a8aafa11", "3ecd7dd20188419bbcd0855404fe228d", "b92ac54e7848482d932d731474ab26a2", "b699399a76b94dbea7e04d5e852d4f29", "cf8687b565a644b59bcab7ea1c3e16b3", "5c61a55db945430b80dd2ce7f0df5ac8", "bb8062a9db0c42c4b8c6f88061130c57", "87ceff8f62d9468da6cf505d0d6aae93", "ec7db76cac3b4e8a81784c440a14c247", "525399528bc54c0b928537fcb81bf4eb", "da3ca8489cb34c779476dfbcfcfd52c3", "88e8e360823b4901a04adafb0e352b5b", "231903c76fd241b89d104300433975cb", "ff5f0355efe1428c956308173060969c", "2a6a5d92c0a348c0aaca8bbf664912bb", "94379112ee5e43e898351005014e0202", "774cdebff3ef4b69bc62b97c5caa3e98", "21945612ba4c40ec943d33f77794bfd7", "30b0859dacf5452fb61beb704183d0e5", "7be39816f81c410eb3899ebf3d565578", "8810aaf657834f168d798e47d170bea3", "387acec5474a4d9393ad0d048d2ff6d9", "5a1bd3d9d6d64dee95d789bc30d82ae3", "dc023689144140f99298e9559860c5e6", "ffbd616e705f4b16b8df7f4589286f56", "0527cdbce0a24dae825cb7a95fe45d81", "030a44142d4a4815a15fa1a9cd0b4f3b", "ad4de7ddc58a4b02bb34e99ed6a484bf", "a854a0c760be4f85a2646f20c56a7d11", "b2188da2c959467b9333be65d3c4f263", "12f6065989064957b3b8a3f877b149e7", "350b69b057e346f4adaf3c90ece3dcda", "39421f248cfc4dd1938c7e51408dd96d", "14d03b195ef44c2cba7f058b61260e33", "7a545fb3a497431ca0662fe48a5243ea", "97580f89e83d43fe8a712a92bdeb6d11", "81697acb646149bb865855b5d4fd4250", "c353da6828f54a148da7440cf22c4fcf", "e2d9018b108048b78fb8da3b3e338f52", "6ea62c47df6d4e1cab0a39edadd7cb9f", "02ba170612d54afda9aba4b69b6c46b9", "99099eb02ff04ad98d79e5a8d6d222f3", "a1be81293007477bacc8c028df29e477", "c28169a69b00452d9b68a81823f6bce7", "8eceb5299c7748d694928f6ce7f4b99c", "fa2520ffe9c94bfe835a2d3df4b17bf7", "58628974056e4a099ac9efe67cbdba9e", "421512854f39478ba5fb9a05a524fa9e", "b0feb8aa77164ab2ab6e300b69acd58a", "5f2a971e42314282b3581bff7af699a3" ] }, "id": "CrLdgBpda4oE", "outputId": "efcc2d40-db3f-4174-c3da-5f38b0566ba8" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "`torch_dtype` is deprecated! Use `dtype` instead!\n" ] }, { "output_type": "display_data", "data": { "text/plain": [ "Loading weights: 0%| | 0/489 [00:00/smolvlm2-vide-qlora-adapter'\n", "\n", "base = AutoModelForImageTextToText.from_pretrained(\n", " model_id, dtype=torch.bfloat16, device_map=\"auto\"\n", ")\n", "model = PeftModel.from_pretrained(base, your_model_folder)\n", "model.push_to_hub(your_model_repo)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 67, "referenced_widgets": [ "32cb4fc987524bc69615856d8acdee96", "11e39b2c0d434dfb935710ac875c33a2", "8418f06fd5a04839bf17b546ccdbdcd4", "f71d21bc26534b44a6727f44681b9c53", "dde158b75b004efabcc569de95b27460", "a62a9c739fe74c76ac7983a8aa29c8b7", "d63299b6b0fd45d8ae9a6f8d6edaccc4", "b1522a6c30b04fb09c641198fbdaf264", "aa08ee015e18497185351224134c7118", "286a97ab27b14a6ba538bd1dc281a94d", "ed1925fe4fca4181812953459d9027c7" ] }, "id": "iseid1NabDUa", "outputId": "45a922fa-2bd8-4eb7-99b6-872b0e6e2454" }, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "MewNUHRGOm0.mp4: 0%| | 0.00/6.84M [00:00 str:\n", " messages = [{\n", " \"role\": \"user\",\n", " \"content\": [\n", " {\"type\": \"video\", \"path\": video_path},\n", " {\"type\": \"text\", \"text\": prompt},\n", " ],\n", " }]\n", "\n", " inputs = processor.apply_chat_template(\n", " messages,\n", " tokenize=True,\n", " add_generation_prompt=True,\n", " return_dict=True,\n", " return_tensors=\"pt\",\n", " do_sample_frames=True,\n", " num_frames=num_frames,\n", " )\n", " inputs = {k: (v.to(model.device) if torch.is_tensor(v) else v) for k, v in inputs.items()}\n", " if \"pixel_values_videos\" in inputs and torch.is_tensor(inputs[\"pixel_values_videos\"]):\n", " inputs[\"pixel_values_videos\"] = inputs[\"pixel_values_videos\"].to(dtype=dtype)\n", "\n", " out = model.generate(\n", " **inputs,\n", " max_new_tokens=128,\n", " do_sample=False,\n", " )\n", "\n", " prompt_len = inputs[\"input_ids\"].shape[-1]\n", " return processor.batch_decode(out[:, prompt_len:], skip_special_tokens=True)[0].strip()\n", "\n", "\n", "print(ask_video(sample_video, \"What is happening here?\", num_frames=8))" ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "H100", "provenance": [], "machine_shape": "hm" }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 0 }