{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "NVaeP7dPCL8R" }, "source": [ "### On the 25 images\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "sYnYocs1wFbh", "outputId": "67fdeb23-7951-4948-c10b-4b690fa722c5" }, "outputs": [ { "ename": "FileURLRetrievalError", "evalue": "Failed to retrieve file url:\n\n\tToo many users have viewed or downloaded this file recently. Please\n\ttry accessing the file again later. If the file you are trying to\n\taccess is particularly large or is shared with many people, it may\n\ttake up to 24 hours to be able to view or download the file. If you\n\tstill can't access a file after 24 hours, contact your domain\n\tadministrator.\n\nYou may still be able to access the file from the browser:\n\n\thttps://drive.google.com/uc?id=13JoK-cyZRPpjDUDvE0bXyn1iWEVzhiPN\n\nbut Gdown can't. Please check connections and permissions.", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mFileURLRetrievalError\u001b[0m Traceback (most recent call last)", "File \u001b[0;32m~/anaconda3/lib/python3.13/site-packages/gdown/download.py:267\u001b[0m, in \u001b[0;36mdownload\u001b[0;34m(url, output, quiet, proxy, speed, use_cookies, verify, id, fuzzy, resume, format, user_agent, log_messages)\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 267\u001b[0m url \u001b[38;5;241m=\u001b[39m get_url_from_gdrive_confirmation(res\u001b[38;5;241m.\u001b[39mtext)\n\u001b[1;32m 268\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m FileURLRetrievalError \u001b[38;5;28;01mas\u001b[39;00m e:\n", "File \u001b[0;32m~/anaconda3/lib/python3.13/site-packages/gdown/download.py:53\u001b[0m, in \u001b[0;36mget_url_from_gdrive_confirmation\u001b[0;34m(contents)\u001b[0m\n\u001b[1;32m 52\u001b[0m error \u001b[38;5;241m=\u001b[39m m\u001b[38;5;241m.\u001b[39mgroups()[\u001b[38;5;241m0\u001b[39m]\n\u001b[0;32m---> 53\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m FileURLRetrievalError(error)\n\u001b[1;32m 54\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m url:\n", "\u001b[0;31mFileURLRetrievalError\u001b[0m: Too many users have viewed or downloaded this file recently. Please try accessing the file again later. If the file you are trying to access is particularly large or is shared with many people, it may take up to 24 hours to be able to view or download the file. If you still can't access a file after 24 hours, contact your domain administrator.", "\nDuring handling of the above exception, another exception occurred:\n", "\u001b[0;31mFileURLRetrievalError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[3], line 5\u001b[0m\n\u001b[1;32m 3\u001b[0m zip_file_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttps://drive.google.com/file/d/13JoK-cyZRPpjDUDvE0bXyn1iWEVzhiPN/view\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 4\u001b[0m file_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m13JoK-cyZRPpjDUDvE0bXyn1iWEVzhiPN\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m----> 5\u001b[0m gdown\u001b[38;5;241m.\u001b[39mdownload(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttps://drive.google.com/uc?id=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfile_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimages.zip\u001b[39m\u001b[38;5;124m\"\u001b[39m, quiet\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n", "File \u001b[0;32m~/anaconda3/lib/python3.13/site-packages/gdown/download.py:278\u001b[0m, in \u001b[0;36mdownload\u001b[0;34m(url, output, quiet, proxy, speed, use_cookies, verify, id, fuzzy, resume, format, user_agent, log_messages)\u001b[0m\n\u001b[1;32m 268\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m FileURLRetrievalError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 269\u001b[0m message \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 270\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to retrieve file url:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 271\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou may still be able to access the file from the browser:\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 276\u001b[0m url_origin,\n\u001b[1;32m 277\u001b[0m )\n\u001b[0;32m--> 278\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m FileURLRetrievalError(message)\n\u001b[1;32m 280\u001b[0m filename_from_url \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 281\u001b[0m last_modified_time \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", "\u001b[0;31mFileURLRetrievalError\u001b[0m: Failed to retrieve file url:\n\n\tToo many users have viewed or downloaded this file recently. Please\n\ttry accessing the file again later. If the file you are trying to\n\taccess is particularly large or is shared with many people, it may\n\ttake up to 24 hours to be able to view or download the file. If you\n\tstill can't access a file after 24 hours, contact your domain\n\tadministrator.\n\nYou may still be able to access the file from the browser:\n\n\thttps://drive.google.com/uc?id=13JoK-cyZRPpjDUDvE0bXyn1iWEVzhiPN\n\nbut Gdown can't. Please check connections and permissions." ] } ], "source": [] }, { "cell_type": "code", "execution_count": 2, "metadata": { "id": "vImy4u6fDkXV" }, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "from PIL import Image" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "id": "d6j7ILnIETTF" }, "outputs": [], "source": [ "import os\n", "from tqdm.auto import tqdm" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "id": "uNBI-E5-JPPI" }, "outputs": [], "source": [ "img100_names = []" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "gUWGP2ykDuu7", "outputId": "62a496f8-d333-4374-8f91-42db543bb5ce" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 144608 images in folder\n", " sample_id catalog_content price \\\n", "0 172829 Item Name: Mueller's Spaghetti, TYPICAL WHEAT ... 3.120 \n", "1 48990 Item Name: Gerber Organic Peach Strawberry Bab... 1.940 \n", "2 36794 Item Name: McCormick Culinary Barbecue Spice, ... 20.230 \n", "3 42102 Item Name: CLIF BUILDERS - Protein Bars - Choc... 9.740 \n", "4 102858 Item Name: Peet's Coffee, Medium Roast Ground ... 16.555 \n", "\n", " image \n", "0 813LrVIv37L.jpg \n", "1 71odaBN41-L.jpg \n", "2 81oPQyWubQL.jpg \n", "3 81N7GZ78EsL.jpg \n", "4 71-id2aNkKL.jpg \n", "mini_train size: 6000\n" ] } ], "source": [ "import os\n", "import random\n", "import pandas as pd\n", "\n", "# Paths\n", "folder_path = \"/content/images/images/\"\n", "train_csv_path = \"/content/train.csv\"\n", "\n", "# Load train.csv\n", "train_df = pd.read_csv(train_csv_path)\n", "\n", "# Extract filenames from image_link\n", "train_df['image_file'] = train_df['image_link'].apply(lambda x: os.path.basename(x))\n", "\n", "# Get all image files in the folder\n", "valid_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.gif')\n", "all_images = []\n", "for root, dirs, files in os.walk(folder_path):\n", " for file in files:\n", " if file.lower().endswith(valid_extensions):\n", " all_images.append(file)\n", "\n", "print(f\"Found {len(all_images)} images in folder\")\n", "\n", "# Keep only rows whose images exist in the folder\n", "existing_images_df = train_df[train_df['image_file'].isin(all_images)]\n", "\n", "# Randomly select up to 6000 images\n", "selected_df = existing_images_df.sample(n=min(6000, len(existing_images_df)), random_state=42)\n", "\n", "# Create mini_train DataFrame\n", "mini_train = pd.DataFrame({\n", " 'sample_id': selected_df['sample_id'].values,\n", " 'catalog_content': selected_df['catalog_content'].values,\n", " 'price': selected_df['price'].values,\n", " 'image': selected_df['image_file'].values\n", "})\n", "\n", "print(mini_train.head())\n", "print(f\"mini_train size: {len(mini_train)}\")\n", "\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "id": "f4TdEU_PF4JQ" }, "outputs": [], "source": [ "train = pd.read_csv(\"train.csv\")\n", "test = pd.read_csv(\"test.csv\",encoding='utf-8')" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 283 }, "id": "T1Vbtk2qpC9o", "outputId": "a9070224-eeaf-4b91-cf63-a5f698c27256" }, "outputs": [ { "data": { "application/vnd.google.colaboratory.intrinsic+json": { "summary": "{\n \"name\": \"train\",\n \"rows\": 75000,\n \"fields\": [\n {\n \"column\": \"sample_id\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 86585,\n \"min\": 0,\n \"max\": 299438,\n \"num_unique_values\": 75000,\n \"samples\": [\n 158784,\n 4095,\n 172021\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"catalog_content\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 74900,\n \"samples\": [\n \"Item Name: Cooper Street Granola Bakes - Chewy Breakfast Granola Bars with Chia, Flax, Buckwheat and Oats - Blueberry Pomegranate Individually Wrapped Nut & Dairy Free On-The-Go or School Snacks - 12 Bars, 1oz each\\nBullet Point 1: Cookies, Made Better - Fuel your day and enjoy delectably chewy goodness with Cooper Street Granola Bars Bulk! Bite into wholesome and healthful ingredients like chia, flax, buckwheat and oats and get off on the right foot with our healthy snack bars!\\nBullet Point 2: Blueberry Pomegranate Flavor - Bursting with locally sourced Michigan blueberries and blended with a pomegranate's pop, our granola snack bars deliver the perfect mix of sweet and tart. Pomegranates and blueberries truly are a match made in heaven!\\nBullet Point 3: Guilt-Free Deliciousness - Start your day on a high note with the energy you need with this pomegranate blueberry bar. Our satiating anytime breakfast cookies individually wrapped satisfy your sweet tooth with high-quality ingredients, for a truly clean and honest energy boost throughout your day!\\nBullet Point 4: Baked With Care - Made with a mix of passion, all natural ingredients and 100 years of tradition, our healthy granola bar is still handmade in our family-run Michigan bakery. Our perfect pick me up is baked to a moist and chewy perfection.\\nBullet Point 5: Enjoyed By All - We go the extra mile to ensure everyone can enjoy our granola soft baked cookies. Made in a dedicated peanut free facility to be dairy free, HFCS free, tans fat free, soy free, artificial flavourings free and also low in sodium. We don't tolerate any nasties!\\nProduct Description: individual snacks breakfast foods individually wrapped breakfast food breakfast snacks individually wrapped granola snacks individual packs healthy bars for kids bars healthy granola bars for kids granola bars kids bars kids granola bars healthy organic kids snacks kids granola bars kids bar organic granola bars snack bars for kids kid bars healthy snacks for kids granola bars variety packs breakfast bars healthy snack bars healthy food bars nut free bars granola bars soft grabola bars granila bars grnola bars granola bara gronala bars gronalla bars individually wrapped breakfast items good snacks for kids granola bar packs kids breakfast bars healthy snacks for kids individually wrapped bars food copper street blueberry pomegranate cookie cooper street cookie blueberry pomegranate cooper street blueberry pomegrante pomegranate blueberry bar cooper street blueberry pomegranate granola cookie bakes cooper street cookies blueberry pomegranate bakes blurberry pomegranit granola cooper street pomegranate blueberry pomogranite granola bake blueberry pomogranate bars blueberry pomegranate granola cookie blueberry and pomegranate granola bar cooper st blueberry pomegranite granola bar cooper st blueberry pomegrante bars blueberry pomegrenate cooper street cooper street blueberry pomegranite cooper street blueberry pomegrante bar copper street granola bar - blueberry pomegranate blueberry pomegranate granola cookie bake blueberry pomegranate granola bakes 12-count (1 oz or 2 oz) pomegranate blueberry snack cooper street snacks blueberry pom cooper street cookies chewy granola bakes blueberry pomegranate 1... cooper farms blueberry pomegranate bars blueberry pomegranate granola bakes blueberry pomegranate granola bars blueberry pomegranate granola bar pomegranate pomegranate snacks 1 ounce - 12 per case cooper street granola cookie bakes blueberry pomegranate blueberry pomegranate bar blueberry pomegranate bars cooper street blueberry pomegranate granola cookie bake\\nValue: 12.0\\nUnit: Ounce\\n\",\n \"Item Name: Stonewall Kitchen Wildflower Honey, 16 Ounces\\nBullet Point 1: Stonewall Kitchen Wildflower Honey, 16 Ounces\\nBullet Point 2: Our Wildflower Honey is a delicious and unique blend of nectars gathered from a variety of flowering trees, shrubs and flowers\\nBullet Point 3: A delectably sweet, medium flavored honey with floral notes\\nBullet Point 4: Perfect for sweetening tea, enjoying on pancakes or for adding wonderful flavor to baked goods\\nBullet Point 5: Stonewall Kitchen Family of Brands: Our award winning line of gourmet food, home goods, and gifts are loved around the world. Featuring brands such as Legal Sea Foods, Michel Design Works, Montebello, Napa Valley Naturals, Stonewall Home, Stonewall Kitchen, Urban Accents, Vermont Coffee Company, Vermont Village, and Village Candle\\nBullet Point 6: About Us: It all started in 1991 at a local farmers' market with a few dozen items that we'd finished hand-labeling only hours before. Fast-forward to today and Stonewall Kitchen is now home to an ever-growing family of like-minded lifestyle brands! Expertly made with premium ingredients, our products are the result of decades spent dreaming up, testing and producing only the very best in specialty foods and fine home living.\\nValue: 16.0\\nUnit: Ounce\\n\",\n \"Item Name: Quaker Large Rice Cakes, Lightly Salted, Pack of 6\\nBullet Point 1: Made with whole grain brown rice and baked to crispy deliciousness\\nBullet Point 2: Enjoy plain or top with your own peanut butter, jelly, or jam. Great for any snacking occasion\\nBullet Point 3: 35 calories per cake\\nBullet Point 4: The perfect amount of crunch, with the taste of salt\\nValue: 26.82\\nUnit: Ounce\\n\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"image_link\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 72288,\n \"samples\": [\n \"https://m.media-amazon.com/images/I/81x1QmnBG-L.jpg\",\n \"https://m.media-amazon.com/images/I/81MWCBM09NL.jpg\",\n \"https://m.media-amazon.com/images/I/91X0Abm9cGL.jpg\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"price\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 33.37693218315522,\n \"min\": 0.13,\n \"max\": 2796.0,\n \"num_unique_values\": 11862,\n \"samples\": [\n 22.075,\n 15.69,\n 55.4\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"description\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 74772,\n \"samples\": [\n \"English Tea Store Rooibos Caffeine Free in Loose Leaf Pouches Bourbon, Vanilla, 4 Oz\\nBullet Point 1: Caffeine free\\nBullet Point 2: The cinnamon spice flavoring gives this Bourbon Street Vanilla Rooibos a wonderful vanilla\\nBullet Point 3: Contents loose leaf tea net weight 4 ounce\\nBullet Point 4: Item Package Weight: 0.3 pounds\",\n \"Goya Foods Penne Rigate Pasta, 16-Ounce (Pack of 20)\\nBullet Point 1: DELICIOUS PASTA | Goya Penne Rigate is a delicious product made from 100% durum wheat. Enjoy Goya Penne Rigate with your favorite sauce for a great Italian dish anytime.\\nBullet Point 2: CONVENIENT & VERSATILE | Enjoy Goya Penne Rigate with your favorite sauce for a great Italian dish anytime, perfect for all of your favorite pasta recipes. An enriched Goya pasta product, Goya Penne Rigate is sure to become your new pantry staple.\\nBullet Point 3: NUTRITIONAL QUALITIES | Low Fat, Saturated Fat Free, Trans Fat Free, Cholesterol Free, Sodium Free\\nBullet Point 4: PREMIUM QUALITY | If it's Goya... it has to be good! | \\u00a1Si es Goya... tiene que ser bueno!\\nBullet Point 5: PACK OF 20: 16 OZ BAGS | Single unit boxes and multipacks of Goya's extensive line of Pastas & Noodles available on Amazon, Amazon Fresh and Prime Pantry\",\n \"Monin - Brown Butter Toffee Syrup, Buttery-Smooth Flavor with Rich Nutty Aroma, Great for Lattes, Milkshakes, and Iced Coffees, Gluten-Free, Vegan, Non-GMO (1 Liter)\\nBullet Point 1: GREAT FOR COFFEES & COCKTAILS: Use our natural Brown Butter Toffee Syrup to add depth and craveabilty to specialty coffees, cocktails, milkshakes and more.\\nBullet Point 2: TASTING NOTES: Brown butter is the ultimate flavor enhancer, delivering rich nutty taste and aroma. Combine that with the sweet & salty notes of toasted toffee and you\\u2019ve got an unbeatable combination.\\nBullet Point 3: SPECIFICATIONS: No Artificial Flavors, No Artificial Sweeteners, Gluten-Free, Non-GMO, Kosher, Allergen-Free, & Dairy-Free.\\nBullet Point 4: SYRUP TO SUPPORT BEE COLONIES: Colony Collapse Disorder is greatly reducing bee populations worldwide. Our syrup recycling initiative has been sending leftover syrup to feed bees since 2008, and has fed over 374 billion bees!\\nBullet Point 5: GREEN PACKAGING & LABELING: We use BPA-free bottles & 100% recyclable boxes made of 30% recycled content. Our products are printed with pollutant-free, water-based ink that is biodegradable, & pulp inserts to help us do our part for the environment.\\nProduct Description: Brown butter is the ultimate flavor enhancer, delivering rich nutty taste and aroma. Combine that with the sweet & salty notes of toasted toffee and you\\u2019ve got an unbeatable combination. Use our natural Brown Butter Toffee Syrup to add depth and craveabilty to specialty coffees, cocktails, milkshakes and more.\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"quantity\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 461.81874317508215,\n \"min\": 0.0,\n \"max\": 63882.0,\n \"num_unique_values\": 3104,\n \"samples\": [\n 372.63,\n 110.4,\n 13.25\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"unit\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 94,\n \"samples\": [\n \" milliliter\",\n \" Fl Oz \",\n \" ml\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"image\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 72288,\n \"samples\": [\n \"81x1QmnBG-L.jpg\",\n \"81MWCBM09NL.jpg\",\n \"91X0Abm9cGL.jpg\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"price_log\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.9420322642519752,\n \"min\": 0.12221763272424921,\n \"max\": 7.936302693201959,\n \"num_unique_values\": 11510,\n \"samples\": [\n 2.9798569243957007,\n 2.4853232296732166,\n 4.645448032486661\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}", "type": "dataframe", "variable_name": "train" }, "text/html": [ "\n", "
\n", "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
sample_idcatalog_contentimage_linkpricedescriptionquantityunitimageprice_log
033127Item Name: La Victoria Green Taco Sauce Mild, ...https://m.media-amazon.com/images/I/51mo8htwTH...4.89La Victoria Green Taco Sauce Mild, 12 Ounce (P...72.00Fl Oz51mo8htwTHL.jpg1.773256
1198967Item Name: Salerno Cookies, The Original Butte...https://m.media-amazon.com/images/I/71YtriIHAA...13.12Salerno Cookies, The Original Butter Cookies, ...32.00Ounce71YtriIHAAL.jpg2.647592
2261251Item Name: Bear Creek Hearty Soup Bowl, Creamy...https://m.media-amazon.com/images/I/51+PFEe-w-...1.97Bear Creek Hearty Soup Bowl, Creamy Chicken wi...11.40Ounce51+PFEe-w-L.jpg1.088562
355858Item Name: Judee’s Blue Cheese Powder 11.25 oz...https://m.media-amazon.com/images/I/41mu0HAToD...30.34Judee’s Blue Cheese Powder 11.25 oz - Gluten-F...11.25Ounce41mu0HAToDL.jpg3.444895
4292686Item Name: kedem Sherry Cooking Wine, 12.7 Oun...https://m.media-amazon.com/images/I/41sA037+Qv...66.49kedem Sherry Cooking Wine, 12.7 Ounce - 12 per...12.00Count41sA037+QvL.jpg4.211979
\n", "
\n", "
\n", "\n", "
\n", " \n", "\n", " \n", "\n", " \n", "
\n", "\n", "\n", "
\n", " \n", "\n", "\n", "\n", " \n", "
\n", "\n", "
\n", "
\n" ], "text/plain": [ " sample_id catalog_content \\\n", "0 33127 Item Name: La Victoria Green Taco Sauce Mild, ... \n", "1 198967 Item Name: Salerno Cookies, The Original Butte... \n", "2 261251 Item Name: Bear Creek Hearty Soup Bowl, Creamy... \n", "3 55858 Item Name: Judee’s Blue Cheese Powder 11.25 oz... \n", "4 292686 Item Name: kedem Sherry Cooking Wine, 12.7 Oun... \n", "\n", " image_link price \\\n", "0 https://m.media-amazon.com/images/I/51mo8htwTH... 4.89 \n", "1 https://m.media-amazon.com/images/I/71YtriIHAA... 13.12 \n", "2 https://m.media-amazon.com/images/I/51+PFEe-w-... 1.97 \n", "3 https://m.media-amazon.com/images/I/41mu0HAToD... 30.34 \n", "4 https://m.media-amazon.com/images/I/41sA037+Qv... 66.49 \n", "\n", " description quantity unit \\\n", "0 La Victoria Green Taco Sauce Mild, 12 Ounce (P... 72.00 Fl Oz \n", "1 Salerno Cookies, The Original Butter Cookies, ... 32.00 Ounce \n", "2 Bear Creek Hearty Soup Bowl, Creamy Chicken wi... 11.40 Ounce \n", "3 Judee’s Blue Cheese Powder 11.25 oz - Gluten-F... 11.25 Ounce \n", "4 kedem Sherry Cooking Wine, 12.7 Ounce - 12 per... 12.00 Count \n", "\n", " image price_log \n", "0 51mo8htwTHL.jpg 1.773256 \n", "1 71YtriIHAAL.jpg 2.647592 \n", "2 51+PFEe-w-L.jpg 1.088562 \n", "3 41mu0HAToDL.jpg 3.444895 \n", "4 41sA037+QvL.jpg 4.211979 " ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train.head()" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "id": "TbslBK5wHtvY" }, "outputs": [], "source": [ "for row in train.iterrows():\n", " img_name = row[1][\"image_link\"].split('/')[-1]\n", " if img_name in img100_names:\n", " idx = img100_names.index(img_name)\n", " mini_train.loc[idx, \"sample_id\"] = row[1][\"sample_id\"]\n", " mini_train.loc[idx, \"catalog_content\"] = row[1][\"catalog_content\"]\n", " mini_train.loc[idx, \"price\"] = row[1][\"price\"]\n", " mini_train.loc[idx, \"image\"] = img_name" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "id": "VhKqm6iTLxCL" }, "outputs": [], "source": [ "def preprocess(df: pd.DataFrame):\n", " for col, default in [(\"description\", \"\"), (\"quantity\", 0.0), (\"unit\", \"\"), (\"image\", None)]:\n", " if col not in df.columns:\n", " df[col] = default\n", "\n", " df[\"description\"] = df[\"description\"].astype(\"string\")\n", " df[\"unit\"] = df[\"unit\"].astype(\"string\")\n", "\n", " descriptions = []\n", " quantities = []\n", " units = []\n", "\n", " for _, row in tqdm(df.iterrows()):\n", " catalog_content = row[\"catalog_content\"].split('\\n')\n", "\n", " # Default values\n", " description = \"\"\n", " quantity = -1.0\n", " unit = \"\"\n", "\n", " if len(catalog_content) >= 3:\n", " description = '\\n'.join(catalog_content[:-3])[11:]\n", " quantity = catalog_content[-3][7:]\n", " unit = catalog_content[-2][5:]\n", "\n", " # Handle missing / invalid quantity\n", " try:\n", " quantity = float(quantity)\n", " except (ValueError, TypeError):\n", " quantity = -1.0\n", "\n", " descriptions.append(description)\n", " quantities.append(quantity)\n", " units.append(unit)\n", "\n", " df[\"description\"] = descriptions\n", " df[\"quantity\"] = quantities\n", " df[\"unit\"] = units\n", "\n", " if \"image_link\" in df.columns:\n", " df[\"image\"] = pd.Series(map(lambda link : link.split('/')[-1], df[\"image_link\"]))\n", "\n", " if \"price\" in df.columns:\n", " df[\"price\"] = pd.to_numeric(df[\"price\"], errors='coerce')\n", " df[\"price_log\"] = np.log1p(df[\"price\"].fillna(0))\n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 112, "referenced_widgets": [ "0d2dfe8079b64c1d9c4e206f8397ecb4", "20d336b8cdc34f209efa04ecee3cca3f", "810965354f974dc688b91bccaeac93f1", "9f7c696071db422993959183da2d592a", "c168e22fdc02476ca8982a26110d37af", "0aad61826a49407583774ed307c5ec0c", "fc9ad6e35b384bf281992e4ac713176c", "18a1fdc90274480ab928086bbdd9f6ff", "9fee55461ee144d9bb27713ad841cbd4", "21ce57edad8b4dd28956e4be3a4199ab", "8bcf45c916c74180af29b8555c763e07", "7c14665fa20343a8a0f549171c9dd93b", "de10c5f479d348e5b9ef82aa2c806914", "719a6175dc3d44e38fce990d84697fd1", "2d3219ad44294e25912571498d1d47e6", "b619c79766d04c0f8859703518ad2a1b", "7dfbac1762ac422b84ef964343335f56", "414f3e0332214c3a8b36dad5084bb782", "571c69b3c7f74b39b20aa300a832c5c8", "356b4534498c4059be95f68735681a6e", "ef759729a8bb489db2f0a0c4e2d4e85d", "ce165d5e3afe443da80201cbea1ad47a", "8a42ba9ccaaa4bd8904990a9594c5063", "82b5bd3237264b38a6087d24823cb8ad", "b681e3653dbc47a0b0e7a43c9f486f63", "f82aef5bd95a4b119d2cd39f44a971d8", "94aa04ab4a324a79a38b5097e74878f9", "7c46c15d8e4b44eeba465d28e36db09c", "d732e33609814019bdef6e97efa81a4a", "d6c62060e41a46b799bdafc42919bfb5", "9b8ef3f0ff52489e86c0e80ac6440cfa", "f95d60aad92841e89be21aa324b1dadd", "31e7e10d1d3a4b9d8122f9393d1d597b" ] }, "id": "JnKeWi2aK6Nh", "outputId": "180912ca-55c9-4bfe-ce38-1a2c189ff849" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0d2dfe8079b64c1d9c4e206f8397ecb4", "version_major": 2, "version_minor": 0 }, "text/plain": [ "0it [00:00, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "7c14665fa20343a8a0f549171c9dd93b", "version_major": 2, "version_minor": 0 }, "text/plain": [ "0it [00:00, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "8a42ba9ccaaa4bd8904990a9594c5063", "version_major": 2, "version_minor": 0 }, "text/plain": [ "0it [00:00, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "preprocess(mini_train)\n", "preprocess(test)\n", "preprocess(train)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "id": "vYN8FzoxmXZm" }, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "\n", "class SMAPELoss(nn.Module):\n", " \"\"\"\n", " Symmetric Mean Absolute Percentage Error (SMAPE)\n", " SMAPE = mean( |pred - target| / ((|pred| + |target|)/2) )\n", " \"\"\"\n", " def __init__(self, eps=1e-8):\n", " super().__init__()\n", " self.eps = eps\n", "\n", " def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n", " \"\"\"\n", " pred: predicted prices, shape (batch_size, 1) or (batch_size,)\n", " target: actual prices, same shape as pred\n", " returns: scalar loss\n", " \"\"\"\n", " pred = pred.view(-1)\n", " target = target.view(-1)\n", "\n", " numerator = torch.abs(pred - target)\n", " denominator = (torch.abs(pred) + torch.abs(target)) / 2 + self.eps\n", "\n", " smape = 200 * torch.mean(numerator / denominator)\n", " return smape" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "k6UusMRybL3N", "outputId": "eb4c06de-3fcf-4b86-99c2-1394439e9c55" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Valid images: 6000 / 6000\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 1/30: 100%|██████████| 375/375 [07:04<00:00, 1.13s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1: Train Loss = 59.4106\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 2/30: 100%|██████████| 375/375 [07:03<00:00, 1.13s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 2: Train Loss = 49.0755\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 3/30: 100%|██████████| 375/375 [06:56<00:00, 1.11s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 3: Train Loss = 42.6034\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 4/30: 100%|██████████| 375/375 [07:01<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 4: Train Loss = 38.2163\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 5/30: 100%|██████████| 375/375 [07:02<00:00, 1.13s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 5: Train Loss = 34.2816\n", "Model saved at /content/clip_regressor_epoch5.pt\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 6/30: 100%|██████████| 375/375 [07:02<00:00, 1.13s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 6: Train Loss = 29.8103\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 7/30: 100%|██████████| 375/375 [07:03<00:00, 1.13s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 7: Train Loss = 26.5908\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 8/30: 100%|██████████| 375/375 [06:59<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 8: Train Loss = 24.4670\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 9/30: 100%|██████████| 375/375 [07:01<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 9: Train Loss = 22.7550\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 10/30: 100%|██████████| 375/375 [06:56<00:00, 1.11s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 10: Train Loss = 21.1924\n", "Model saved at /content/clip_regressor_epoch10.pt\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 11/30: 100%|██████████| 375/375 [07:00<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 11: Train Loss = 20.0946\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 12/30: 100%|██████████| 375/375 [06:54<00:00, 1.10s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 12: Train Loss = 18.3806\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 13/30: 100%|██████████| 375/375 [07:00<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 13: Train Loss = 16.6626\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 14/30: 100%|██████████| 375/375 [06:58<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 14: Train Loss = 15.9724\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 15/30: 100%|██████████| 375/375 [06:57<00:00, 1.11s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 15: Train Loss = 15.4344\n", "Model saved at /content/clip_regressor_epoch15.pt\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 16/30: 100%|██████████| 375/375 [06:59<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 16: Train Loss = 14.2654\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 17/30: 100%|██████████| 375/375 [06:58<00:00, 1.12s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 17: Train Loss = 13.6136\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Epoch 18/30: 29%|██▊ | 107/375 [01:59<05:11, 1.16s/it]" ] } ], "source": [ "from torch.utils.data import Dataset, DataLoader\n", "from transformers import CLIPProcessor, CLIPModel\n", "import cv2\n", "import torch\n", "import torch.nn as nn\n", "from tqdm import tqdm\n", "import os\n", "from PIL import Image # Add this import\n", "\n", "from torch.utils.data import Dataset\n", "from PIL import Image\n", "import os\n", "import cv2\n", "\n", "class CLIPProductDataset(Dataset):\n", " def __init__(self, df, images_folder, processor):\n", " self.images_folder = images_folder\n", " self.processor = processor\n", " # Filter out non-image files and validate\n", " self.data = self._validate_images(df)\n", "\n", " def _validate_images(self, df):\n", " valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.webp'}\n", " valid_rows = []\n", " for idx, row in df.iterrows():\n", " image_path = row['image']\n", " if image_path.startswith('.') or '/.DS_Store' in image_path:\n", " continue\n", " ext = os.path.splitext(image_path)[1].lower()\n", " if ext not in valid_extensions:\n", " continue\n", " full_path = os.path.join(self.images_folder, image_path)\n", " if os.path.exists(full_path):\n", " valid_rows.append(row)\n", " else:\n", " print(f\"Warning: Image not found - {full_path}\")\n", " print(f\"Valid images: {len(valid_rows)} / {len(df)}\")\n", " return df.loc[[row.name for row in valid_rows]].reset_index(drop=True)\n", "\n", " def __len__(self):\n", " return len(self.data)\n", "\n", " def __getitem__(self, idx):\n", " item = self.data.iloc[idx]\n", " image_path = item['image']\n", " full_path = os.path.join(self.images_folder, image_path)\n", " image = cv2.imread(full_path)\n", " if image is None:\n", " raise FileNotFoundError(f\"Could not load image: {full_path}\")\n", " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", " image = Image.fromarray(image) # convert to PIL Image\n", " text = item['catalog_content']\n", " label = float(item['price_log'])\n", " return {\"image\": image, \"text\": text, \"label\": label}\n", "\n", "\n", "def collate_fn(batch):\n", " images = [b[\"image\"] for b in batch] # PIL Images\n", " texts = [b[\"text\"] for b in batch]\n", " labels = torch.tensor([b[\"label\"] for b in batch], dtype=torch.float)\n", "\n", " encoding = processor(\n", " text=texts,\n", " images=images,\n", " return_tensors=\"pt\",\n", " padding=True,\n", " truncation=True\n", " )\n", "\n", " return {\n", " \"pixel_values\": encoding[\"pixel_values\"],\n", " \"input_ids\": encoding[\"input_ids\"],\n", " \"attention_mask\": encoding[\"attention_mask\"],\n", " \"labels\": labels\n", " }\n", "\n", "\n", "class CLIPRegressionModel(nn.Module):\n", " def __init__(self, clip_model_name=\"openai/clip-vit-base-patch32\"):\n", " super().__init__()\n", " self.clip = CLIPModel.from_pretrained(clip_model_name)\n", " hidden_size = self.clip.config.projection_dim # 512\n", " self.regressor = nn.Linear(hidden_size * 2, 1)\n", "\n", " def forward(self, input_ids, attention_mask, pixel_values):\n", " image_embeds = self.clip.get_image_features(pixel_values=pixel_values)\n", " text_embeds = self.clip.get_text_features(input_ids=input_ids, attention_mask=attention_mask)\n", " combined = torch.cat([image_embeds, text_embeds], dim=1)\n", " return self.regressor(combined).squeeze(-1)\n", "\n", "# class SMAPELoss(nn.Module):\n", "# def forward(self, pred, target):\n", "# return torch.mean(torch.abs(pred - target) / (torch.abs(pred) + torch.abs(target) + 1e-8))\n", "\n", "# Initialize processor - remove do_rescale parameter\n", "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-base-patch32\")\n", "\n", "# Create dataset\n", "train_dataset = CLIPProductDataset(\n", " mini_train,\n", " \"/content/images/images/train\",\n", " processor\n", ")\n", "\n", "# Create dataloader\n", "train_loader = DataLoader(\n", " train_dataset,\n", " batch_size=16,\n", " shuffle=True,\n", " collate_fn=collate_fn\n", ")\n", "\n", "# Setup training\n", "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "model = CLIPRegressionModel().to(device)\n", "criterion = SMAPELoss()\n", "optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)\n", "\n", "NUM_EPOCHS = 30\n", "train_losses = []\n", "\n", "for epoch in range(NUM_EPOCHS):\n", " model.train()\n", " total_loss = 0\n", "\n", " for batch in tqdm(train_loader, desc=f\"Epoch {epoch+1}/{NUM_EPOCHS}\"):\n", " batch = {k: v.to(device) for k, v in batch.items()}\n", "\n", " preds = model(\n", " batch['input_ids'],\n", " batch['attention_mask'],\n", " batch['pixel_values']\n", " )\n", " loss = criterion(preds, batch['labels'])\n", "\n", " optimizer.zero_grad()\n", " loss.backward()\n", " torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n", " optimizer.step()\n", "\n", " total_loss += loss.item()\n", "\n", " avg_loss = total_loss / len(train_loader)\n", " train_losses.append(avg_loss)\n", " print(f\"Epoch {epoch+1}: Train Loss = {avg_loss:.4f}\")\n", "\n", " # Save the model every 5 epochs\n", " if (epoch + 1) % 5 == 0:\n", " save_path = f\"/content/clip_regressor_epoch{epoch+1}.pt\"\n", " torch.save({\n", " \"epoch\": epoch + 1,\n", " \"model_state_dict\": model.state_dict(),\n", " \"optimizer_state_dict\": optimizer.state_dict(),\n", " \"loss\": avg_loss\n", " }, save_path)\n", " print(f\"Model saved at {save_path}\")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "x_MGieR2N39V" }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "J_QJvyLLyIEA" }, "outputs": [], "source": [ "# Save model weights\n", "torch.save(model.state_dict(), \"/content/clip_model.pt\")\n", "\n", "# Later, you can load it like this:\n", "# model.load_state_dict(torch.load(\"/content/clip_model.pt\"))\n", "# model.to(device)\n", "# model.eval()\n", "# Save the model and processor\n", "model.save_pretrained(\"/content/clip_model\")\n", "processor.save_pretrained(\"/content/clip_processor\")\n", "\n", "# Later, reload with:\n", "# from transformers import CLIPModel, CLIPProcessor\n", "# model = CLIPModel.from_pretrained(\"/content/clip_model\").to(device)\n", "# processor = CLIPProcessor.from_pretrained(\"/content/clip_processor\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ivDxi252cyIX" }, "outputs": [], "source": [ "plt.plot(train_losses)\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "GZ7uOwfeN5ld" }, "outputs": [], "source": [ "# device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "\n", "# # Initialize processor\n", "# processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-base-patch32\")\n", "\n", "# # Initialize model\n", "# model = CLIPRegressionModel().to(device)\n", "\n", "# # Load checkpoint\n", "# checkpoint_path = \"/content/clip_regressor_epoch5.pt\" # choose the epoch file you want\n", "# checkpoint = torch.load(checkpoint_path, map_location=device)\n", "\n", "# model.load_state_dict(checkpoint[\"model_state_dict\"])\n", "# model.eval() # set to evaluation mode\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "MlcqCkKF7TsC" }, "outputs": [], "source": [ "import pandas as pd\n", "import os\n", "import cv2\n", "from PIL import Image\n", "import torch\n", "import numpy as np\n", "from tqdm import tqdm\n", "\n", "# ----------------------------\n", "# 1. Build mini_test from images\n", "# ----------------------------\n", "\n", "img28_names = []\n", "mini_test = pd.DataFrame(columns=['sample_id', 'catalog_content', 'price', \"image\"])\n", "\n", "for entry in os.scandir(\"/content/images/images/train\"):\n", " if entry.is_file():\n", " img28_names.append(entry.name)\n", " if len(img28_names) == 28:\n", " break\n", "\n", "for _, row in train.iterrows(): # Assuming 'train' has sample_id, catalog_content, price, image_link\n", " img_name = row[\"image_link\"].split('/')[-1]\n", " if img_name in img28_names:\n", " idx = img28_names.index(img_name)\n", " mini_test.loc[idx, \"sample_id\"] = row[\"sample_id\"]\n", " mini_test.loc[idx, \"catalog_content\"] = row[\"catalog_content\"]\n", " mini_test.loc[idx, \"price\"] = row[\"price\"]\n", " mini_test.loc[idx, \"image\"] = img_name\n", "\n", "# Drop rows with missing image/catalog fields if any\n", "mini_test = mini_test.dropna(subset=[\"image\", \"catalog_content\"]).reset_index(drop=True)\n", "\n", "# ----------------------------\n", "# 2. Prepare for inference\n", "# ----------------------------\n", "\n", "# Create a dataset-like structure\n", "texts = mini_test[\"catalog_content\"].tolist()\n", "images = []\n", "\n", "for img_name in mini_test[\"image\"]:\n", " full_path = os.path.join(\"/content/images/images/train/\", img_name)\n", " img_bgr = cv2.imread(full_path)\n", " if img_bgr is None:\n", " raise FileNotFoundError(f\"Could not load: {full_path}\")\n", " img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)\n", " pil_img = Image.fromarray(img_rgb)\n", " images.append(pil_img)\n", "\n", "# ----------------------------\n", "# 3. Encode with processor\n", "# ----------------------------\n", "inputs = processor(\n", " text=texts,\n", " images=images,\n", " return_tensors=\"pt\",\n", " padding=True,\n", " truncation=True\n", ").to(device)\n", "\n", "# ----------------------------\n", "# 4. Run inference\n", "# ----------------------------\n", "model.eval()\n", "with torch.no_grad():\n", " preds = model(\n", " input_ids=inputs[\"input_ids\"],\n", " attention_mask=inputs[\"attention_mask\"],\n", " pixel_values=inputs[\"pixel_values\"]\n", " )\n", "\n", "pred_prices_log = preds.cpu().numpy()\n", "pred_prices_original = np.expm1(pred_prices_log)\n", "\n", "# ----------------------------\n", "# 5. Compare with true prices\n", "# ----------------------------\n", "true_prices = mini_test[\"price\"].values\n", "\n", "for i, (pred, true) in enumerate(zip(pred_prices_original, true_prices)):\n", " print(f\"Product {i+1}: Predicted Price = {pred:.2f}, True Price = {true:.2f}\")\n" ] }, { "cell_type": "code", "execution_count": 43, "metadata": { "id": "eg1Es-yD85Br" }, "outputs": [], "source": [ "test_df = test" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "FPKshNAyA14L" }, "outputs": [], "source": [ "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Euk_hVkz80s5", "outputId": "7b21aba4-54ae-4b8d-f0f3-5f3adce4a8d1" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Loading images: 7%|▋ | 5318/75000 [02:26<25:38, 45.29it/s]" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[WARN] Skipping missing image: /content/images/images/test/61oR4sVkpaL.jpg\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Loading images: 8%|▊ | 5918/75000 [02:46<1:43:24, 11.13it/s]" ] } ], "source": [ "import pandas as pd\n", "import os\n", "import cv2\n", "from PIL import Image\n", "import torch\n", "import numpy as np\n", "from tqdm import tqdm\n", "\n", "# ---------------------------------------------------\n", "# ✅ 1. Use your test dataframe\n", "# ---------------------------------------------------\n", "test_df = test.dropna(subset=[\"catalog_content\", \"image\"]).reset_index(drop=True)\n", "\n", "texts = []\n", "images = []\n", "valid_sample_ids = []\n", "\n", "IMAGE_FOLDER = \"/content/images/images/test/\"\n", "\n", "for idx, row in tqdm(test_df.iterrows(), total=len(test_df), desc=\"Loading images\"):\n", " img_name = row[\"image\"]\n", " full_path = os.path.join(IMAGE_FOLDER, img_name)\n", "\n", " img_bgr = cv2.imread(full_path)\n", "\n", " if img_bgr is None:\n", " # ✅ Skip this sample but continue\n", " print(f\"[WARN] Skipping missing image: {full_path}\")\n", " continue\n", "\n", " img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)\n", " pil_img = Image.fromarray(img_rgb)\n", "\n", " texts.append(row[\"catalog_content\"])\n", " images.append(pil_img)\n", " valid_sample_ids.append(row[\"sample_id\"])\n", "\n", "# ---------------------------------------------------\n", "# ✅ Skip if no valid images\n", "# ---------------------------------------------------\n", "if len(images) == 0:\n", " raise RuntimeError(\"No valid images found. Check folder path or filenames.\")\n", "\n", "# ---------------------------------------------------\n", "# ✅ 2. Encode using processor\n", "# ---------------------------------------------------\n", "inputs = processor(\n", " text=texts,\n", " images=images,\n", " return_tensors=\"pt\",\n", " padding=True,\n", " truncation=True\n", ").to(device)\n", "\n", "# ---------------------------------------------------\n", "# ✅ 3. Inference\n", "# ---------------------------------------------------\n", "model.eval()\n", "with torch.no_grad():\n", " preds = model(\n", " input_ids=inputs[\"input_ids\"],\n", " attention_mask=inputs[\"attention_mask\"],\n", " pixel_values=inputs[\"pixel_values\"]\n", " )\n", "\n", "# Assuming outputs are log prices:\n", "pred_prices_log = preds.cpu().numpy()\n", "pred_prices_original = np.expm1(pred_prices_log)\n", "\n", "# ---------------------------------------------------\n", "# ✅ 4. Build full submission with all sample_ids\n", "# ---------------------------------------------------\n", "submission = pd.DataFrame({\n", " \"sample_id\": test[\"sample_id\"], # all test entries\n", "})\n", "\n", "# Map predictions only for valid sample_ids\n", "pred_series = pd.Series(\n", " pred_prices_original.flatten(),\n", " index=valid_sample_ids\n", ")\n", "\n", "# Assign predictions (others become NaN)\n", "submission[\"price\"] = submission[\"sample_id\"].map(pred_series)\n", "\n", "# ✅ Option 1: Fill missing prices with a default value\n", "submission[\"price\"].fillna(0, inplace=True) # change if needed\n", "\n", "submission.to_csv(\"submission.csv\", index=False)\n", "print(\"✅ submission.csv created successfully with all sample_ids!\")\n", "print(submission.head())\n", "print(\"Total rows in submission:\", len(submission))\n", "print(\"Missing price entries:\", submission['price'].isna().sum())\n", "\n" ] }, { "cell_type": "code", "execution_count": 37, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 374 }, "id": "JhQzHC_vr4QZ", "outputId": "c79cf8dc-72e5-409b-8430-b7ff66bfed75" }, "outputs": [ { "ename": "IndexError", "evalue": "list index out of range", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipython-input-143414783.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0mimages\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtransforms\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mToTensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcvtColor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/content/images/\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mp\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"image\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCOLOR_BGR2RGB\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mp\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mproducts\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 28\u001b[0;31m inputs = processor(\n\u001b[0m\u001b[1;32m 29\u001b[0m \u001b[0mtext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtexts\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0mimages\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mimages\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/processing_utils.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, images, text, videos, audio, **kwargs)\u001b[0m\n\u001b[1;32m 592\u001b[0m \u001b[0minput_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_kwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mattribute_to_kwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mattribute_name\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 593\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minput_data\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mattribute\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 594\u001b[0;31m \u001b[0mattribute_output\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mattribute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0minput_kwargs\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 595\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mattribute_output\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 596\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/image_processing_utils.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, images, **kwargs)\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mBatchFeature\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0;34m\"\"\"Preprocess an image or a batch of images.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 51\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpreprocess\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 52\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mpreprocess\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimages\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mBatchFeature\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/models/clip/image_processing_clip.py\u001b[0m in \u001b[0;36mpreprocess\u001b[0;34m(self, images, do_resize, size, resample, do_center_crop, crop_size, do_rescale, rescale_factor, do_normalize, image_mean, image_std, do_convert_rgb, return_tensors, data_format, input_data_format, **kwargs)\u001b[0m\n\u001b[1;32m 313\u001b[0m \u001b[0mimages\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mto_numpy_array\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mimage\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mimages\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 314\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 315\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0mdo_rescale\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mis_scaled_image\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 316\u001b[0m logger.warning_once(\n\u001b[1;32m 317\u001b[0m \u001b[0;34m\"It looks like you are trying to rescale already rescaled images. If the input\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mIndexError\u001b[0m: list index out of range" ] } ], "source": [ "img28_names = []\n", "\n", "mini_test = pd.DataFrame(columns=['sample_id', 'catalog_content', 'price', \"image\"])\n", "for entry in os.scandir(\"/content/images/images/train\"):\n", " if entry.is_file():\n", " img28_names.append(entry.name)\n", "\n", "for row in train.iterrows():\n", " img_name = row[1][\"image_link\"].split('/')[-1]\n", " if img_name in img28_names:\n", " idx = img28_names.index(img_name)\n", " mini_test.loc[idx, \"sample_id\"] = row[1][\"sample_id\"]\n", " mini_test.loc[idx, \"catalog_content\"] = row[1][\"catalog_content\"]\n", " mini_test.loc[idx, \"price\"] = row[1][\"price\"]\n", " mini_test.loc[idx, \"image\"] = img_name\n", "\n", "products = []\n", "for _, row in mini_test.iterrows():\n", " products.append({\n", " \"catalog_content\": row[\"catalog_content\"],\n", " \"image\": row[\"image\"],\n", " \"quantity\": row.get(\"quantity\", 1.0)\n", " })\n", "\n", "texts = [p[\"catalog_content\"] for p in products]\n", "images = [(transforms.ToTensor()(cv2.cvtColor(cv2.imread(\"/content/images/\" + p[\"image\"]), cv2.COLOR_BGR2RGB))) for p in products]\n", "\n", "inputs = processor(\n", " text=texts,\n", " images=images,\n", " return_tensors=\"pt\",\n", " padding=True,\n", " truncation=True\n", ")\n", "inputs = {k:v.to(device) for k,v in inputs.items()}\n", "\n", "with torch.no_grad():\n", " preds = model(\n", " input_ids=inputs[\"input_ids\"],\n", " attention_mask=inputs[\"attention_mask\"],\n", " pixel_values=inputs[\"pixel_values\"]\n", " )\n", "\n", "pred_prices = preds.cpu().numpy()\n", "pred_prices_original = np.expm1(pred_prices)\n", "true_prices = sample_test[\"price\"].values\n", "for i, (pred, true) in enumerate(zip(pred_prices_original, true_prices)):\n", " print(f\"Product {i+1}: Predicted Price = {pred:.2f}, True Price = {true:.2f}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "CS0tIWOGCMT1" }, "outputs": [], "source": [ "plt.hist(train[\"price\"], bins=100)\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "5ZwpmExRXo9c" }, "outputs": [], "source": [ "plt.hist(train[\"price_log\"], bins=100)\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "TuIKzAvUZcGP" }, "outputs": [], "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "A100", "provenance": [] }, "kernelspec": { "display_name": "base", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.13.5" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "0aad61826a49407583774ed307c5ec0c": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "0d2dfe8079b64c1d9c4e206f8397ecb4": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_20d336b8cdc34f209efa04ecee3cca3f", "IPY_MODEL_810965354f974dc688b91bccaeac93f1", "IPY_MODEL_9f7c696071db422993959183da2d592a" ], "layout": "IPY_MODEL_c168e22fdc02476ca8982a26110d37af" } }, "18a1fdc90274480ab928086bbdd9f6ff": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "20d336b8cdc34f209efa04ecee3cca3f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_0aad61826a49407583774ed307c5ec0c", "placeholder": "​", "style": "IPY_MODEL_fc9ad6e35b384bf281992e4ac713176c", "value": "" } }, "21ce57edad8b4dd28956e4be3a4199ab": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2d3219ad44294e25912571498d1d47e6": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_ef759729a8bb489db2f0a0c4e2d4e85d", "placeholder": "​", "style": "IPY_MODEL_ce165d5e3afe443da80201cbea1ad47a", "value": " 75000/? [00:03<00:00, 24264.23it/s]" } }, "31e7e10d1d3a4b9d8122f9393d1d597b": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "356b4534498c4059be95f68735681a6e": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "414f3e0332214c3a8b36dad5084bb782": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "571c69b3c7f74b39b20aa300a832c5c8": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "719a6175dc3d44e38fce990d84697fd1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_571c69b3c7f74b39b20aa300a832c5c8", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_356b4534498c4059be95f68735681a6e", "value": 1 } }, "7c14665fa20343a8a0f549171c9dd93b": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_de10c5f479d348e5b9ef82aa2c806914", "IPY_MODEL_719a6175dc3d44e38fce990d84697fd1", "IPY_MODEL_2d3219ad44294e25912571498d1d47e6" ], "layout": "IPY_MODEL_b619c79766d04c0f8859703518ad2a1b" } }, "7c46c15d8e4b44eeba465d28e36db09c": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "7dfbac1762ac422b84ef964343335f56": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "810965354f974dc688b91bccaeac93f1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_18a1fdc90274480ab928086bbdd9f6ff", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_9fee55461ee144d9bb27713ad841cbd4", "value": 1 } }, "82b5bd3237264b38a6087d24823cb8ad": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_7c46c15d8e4b44eeba465d28e36db09c", "placeholder": "​", "style": "IPY_MODEL_d732e33609814019bdef6e97efa81a4a", "value": "" } }, "8a42ba9ccaaa4bd8904990a9594c5063": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_82b5bd3237264b38a6087d24823cb8ad", "IPY_MODEL_b681e3653dbc47a0b0e7a43c9f486f63", "IPY_MODEL_f82aef5bd95a4b119d2cd39f44a971d8" ], "layout": "IPY_MODEL_94aa04ab4a324a79a38b5097e74878f9" } }, "8bcf45c916c74180af29b8555c763e07": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "94aa04ab4a324a79a38b5097e74878f9": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9b8ef3f0ff52489e86c0e80ac6440cfa": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "9f7c696071db422993959183da2d592a": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_21ce57edad8b4dd28956e4be3a4199ab", "placeholder": "​", "style": "IPY_MODEL_8bcf45c916c74180af29b8555c763e07", "value": " 6000/? [00:00<00:00, 23264.82it/s]" } }, "9fee55461ee144d9bb27713ad841cbd4": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "b619c79766d04c0f8859703518ad2a1b": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b681e3653dbc47a0b0e7a43c9f486f63": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d6c62060e41a46b799bdafc42919bfb5", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_9b8ef3f0ff52489e86c0e80ac6440cfa", "value": 1 } }, "c168e22fdc02476ca8982a26110d37af": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ce165d5e3afe443da80201cbea1ad47a": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "d6c62060e41a46b799bdafc42919bfb5": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": "20px" } }, "d732e33609814019bdef6e97efa81a4a": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "de10c5f479d348e5b9ef82aa2c806914": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_7dfbac1762ac422b84ef964343335f56", "placeholder": "​", "style": "IPY_MODEL_414f3e0332214c3a8b36dad5084bb782", "value": "" } }, "ef759729a8bb489db2f0a0c4e2d4e85d": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f82aef5bd95a4b119d2cd39f44a971d8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_f95d60aad92841e89be21aa324b1dadd", "placeholder": "​", "style": "IPY_MODEL_31e7e10d1d3a4b9d8122f9393d1d597b", "value": " 75000/? [00:03<00:00, 24047.16it/s]" } }, "f95d60aad92841e89be21aa324b1dadd": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "fc9ad6e35b384bf281992e4ac713176c": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } } } } }, "nbformat": 4, "nbformat_minor": 0 }