{"cells":[{"cell_type":"markdown","source":["This Notebook keyframe extracts all videos found under /content/drive/MyDrive/Saved from Chrome/. The Mp4 videos are sent to Trash on your Drive folder after processing."],"metadata":{"id":"SCY_pg9PBwdW"}},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"J3TdxpA2z9Kd"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"wTbO9mWbDXNr"},"outputs":[],"source":["\n","# @markdown ๐ Rapid keyframe processing\n","# @markdown
------
Extract Keyframes from ALL mp4 / webm videos found on Google Drive\n","# @markdown
Be mindful of Google Drive Terms of Service\n","# @markdown
This cell will process all mp4 videos found under\n","# @markdown
/content/drive/MyDrive/Saved from Chrome/\n","delete_mp4_when_done = True # @param {type:'boolean'}\n","# @markdown
deleted mp4/webm files will be found under 'trash' in your Google drive\n","# @markdown
-------\n","# @markdown
(Optional) Add a direct video link to below field.\n","# @markdown
Multiple links can be written in this field\n","# @markdown
separated by comma. Like this:
' https:\\\\\\my_video.mp4 , https:\\\\\\second_video.webm , .... '\n","import os\n","import shutil\n","!pip install video-kf\n","!pip install ffmpeg-python\n","!pip install wget\n","!pip install moviepy\n","import wget\n","import videokf as vf\n","import time\n","proc_keyframes=True # @param {type:'boolean'}\n","proc_audio=False # @param {type:'boolean'}\n","#def mkdirs(folder):\n","# if not os.path.exists(folder):os.makedirs(folder)\n","#----#\n","direct_link = '' # @param {type:'string'}\n","# @markdown The linked videos will be downloaded to the Google drive prior to running the script.\n","# @markdown
This feature is useful for direct processing .webm from 4chan threads into keyframes\n","use_link = False # @param {type:'boolean'}\n","if direct_link.find('http')>-1: use_link = True\n","if use_link:\n"," %cd '/content/drive/MyDrive/Saved from Chrome/'\n"," for link in direct_link.split(','):\n"," if not link.find('http')>-1:continue\n"," wget.download(link.strip())\n"," time.sleep(5)\n"," %cd '/content/'\n","#-----#\n","filenames = []\n","srcpath = '/content/drive/MyDrive/Saved from Chrome/'\n","destpath = '/content/drive/MyDrive/'\n","localpath = '/content/'\n","converted = ''\n","for filename in os.listdir(f'{srcpath}'):\n"," if filename.find('.zip')>-1:\n"," %cd {srcpath}\n"," !unzip {filename}\n"," os.remove(filename)\n"," filename = filename.replace('.zip','')\n"," for suffix in ['.mkv','.mp4','.webm']:\n"," if filename.find(f'{suffix}')>-1: filenames.append(filename)\n","#Rename the downloaded video to 'tgt0' before running this cell\n","def my_mkdirs(folder):\n"," if os.path.exists(folder):shutil.rmtree(folder)\n"," os.makedirs(folder)\n","#----#\n","# @markdown Write a funny name for the folder(s) containing the keyframes\n","name_keyframes_as='' # @param {type:'string'}\n","# @markdown Created .zip files will not be overwritten\n","#NUM_ITEMS = 1 # @param {type:'slider', min:1 , max:20,step:1}\n","if name_keyframes_as.strip()=='': name_keyframes_as='keyframes'\n","num = 0\n","savepath = ''\n","%cd {localpath}\n","for filename in filenames:\n"," tgt_folder = f'/content/tmp'\n"," my_mkdirs(f'{tgt_folder}')\n"," print(f'Now processing video {filename}...')\n"," if proc_keyframes:\n"," vf.extract_keyframes(f'{srcpath}{filename}',output_dir_keyframes=f'{tgt_folder}')\n"," savepath = f'{destpath}{name_keyframes_as}_v{num}_kf'\n"," #---#\n"," while os.path.exists(f'{savepath}.zip'):\n"," #print(f'{savepath}.zip already exists...')\n"," num = num+1\n"," savepath = f'{destpath}{name_keyframes_as}_v{num}_kf'\n"," #---#\n"," shutil.make_archive(savepath,'zip' , f'{tgt_folder}')\n"," #from moviepy.editor import VideoFileClip\n"," if proc_audio:\n"," from moviepy.editor import VideoFileClip\n"," # Load the WebM file\n"," video = VideoFileClip(f\"{srcpath}{filename}\")\n","\n"," # Extract audio and save as MP3 (or WAV, etc.)\n"," audio = video.audio\n"," savepath = f\"{destpath}_audio_v{num}.mp3\"\n","\n"," while os.path.exists(savepath):\n"," num = num+1\n"," savepath= f\"{destpath}_audio_v{num}.mp3\"\n"," #----#\n"," if audio:\n"," audio.write_audiofile(f'{savepath}')\n"," # Close the files to free resources\n"," audio.close()\n"," video.close()\n"," #----#\n"," if delete_mp4_when_done: os.remove(f'{srcpath}{filename}')\n"," num = num+1\n"]},{"cell_type":"markdown","source":["This Cell below will put all the keyframes of the videos into a single zip folder โฌ๏ธ"],"metadata":{"id":"83W4CCPZCZVE"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"uZXUfKefmCIv"},"outputs":[],"source":["import os\n","import zipfile\n","import glob\n","from google.colab import drive\n","import shutil\n","from pathlib import Path\n","\n","# Mount Google Drive\n","drive.mount('/content/drive')\n","\n","# Define the directory containing the zip files\n","zip_dir = '/content/drive/MyDrive/'\n","zip_pattern = 'keyframes_v*_kf.zip'\n","output_zip = '/content/drive/MyDrive/all_keyframes_combined.zip'\n","\n","# Temporary extraction directory\n","temp_dir = '/content/temp_extracted'\n","os.makedirs(temp_dir, exist_ok=True)\n","\n","# Function to check if file is an image\n","def is_image_file(filename):\n"," image_extensions = {\n"," '.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.tif',\n"," '.webp', '.ico', '.svg', '.heic', '.heif', '.raw', '.cr2',\n"," '.nef', '.arw', '.dng', '.jpe', '.jp2', '.j2k'\n"," }\n"," ext = Path(filename).suffix.lower()\n"," return ext in image_extensions\n","\n","# Store original zip files for cleanup\n","original_zip_files = []\n","\n","print(\"Finding zip files...\")\n","zip_files = glob.glob(os.path.join(zip_dir, zip_pattern))\n","original_zip_files = zip_files.copy() # Keep for cleanup\n","print(f\"Found {len(zip_files)} zip files: {[os.path.basename(z) for z in zip_files]}\")\n","\n","# Extract each zip file\n","image_txt_pairs = []\n","for zip_path in zip_files:\n"," print(f\"\\n๐ Extracting: {os.path.basename(zip_path)}\")\n","\n"," with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," # Extract to temporary directory with unique subfolder\n"," unique_subdir = os.path.join(temp_dir, Path(zip_path).stem)\n"," zip_ref.extractall(unique_subdir)\n","\n"," # Find all files in the extracted content\n"," for root, dirs, files in os.walk(unique_subdir):\n"," for file in files:\n"," file_path = os.path.join(root, file)\n"," base_name = Path(file).stem\n"," ext = Path(file).suffix.lower()\n","\n"," if is_image_file(file):\n"," # Look for corresponding txt file\n"," txt_path = None\n"," possible_txt_names = [\n"," f\"{base_name}.txt\",\n"," f\"{base_name.lower()}.txt\",\n"," f\"{base_name.upper()}.txt\"\n"," ]\n","\n"," for txt_name in possible_txt_names:\n"," potential_txt = os.path.join(root, txt_name)\n"," if os.path.exists(potential_txt):\n"," txt_path = potential_txt\n"," break\n","\n"," image_txt_pairs.append({\n"," 'image_path': file_path,\n"," 'txt_path': txt_path,\n"," 'base_name': base_name\n"," })\n","\n","print(f\"\\n๐ Total image-txt pairs found: {len(image_txt_pairs)}\")\n","print(f\" Images with matching txt: {sum(1 for p in image_txt_pairs if p['txt_path'])}\")\n","print(f\" Images without txt: {sum(1 for p in image_txt_pairs if not p['txt_path'])}\")\n","\n","# Create the output zip file\n","print(\"\\n๐๏ธ Creating combined zip file...\")\n","combined_zip_created = False\n","\n","try:\n"," with zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED) as output_zip_file:\n"," for i, pair in enumerate(image_txt_pairs, 1):\n"," try:\n"," # Create new image filename (keep original extension)\n"," original_ext = Path(pair['image_path']).suffix.lower()\n"," new_image_name = f\"{i}{original_ext}\"\n","\n"," # Add image\n"," with open(pair['image_path'], 'rb') as img_file:\n"," img_data = img_file.read()\n"," output_zip_file.writestr(new_image_name, img_data)\n","\n"," # Add corresponding txt file if exists\n"," if pair['txt_path'] and os.path.exists(pair['txt_path']):\n"," new_txt_name = f\"{i}.txt\"\n"," with open(pair['txt_path'], 'rb') as txt_file:\n"," txt_data = txt_file.read()\n"," output_zip_file.writestr(new_txt_name, txt_data)\n","\n"," if i % 50 == 0:\n"," print(f\"Processed {i}/{len(image_txt_pairs)} items...\")\n","\n"," except Exception as e:\n"," print(f\"โ Error processing item {i}: {e}\")\n"," continue\n","\n"," # Verify the output zip was created successfully\n"," if os.path.exists(output_zip) and os.path.getsize(output_zip) > 0:\n"," combined_zip_created = True\n"," print(\"โ
Combined zip file created successfully!\")\n"," else:\n"," print(\"โ Failed to create combined zip file!\")\n"," combined_zip_created = False\n","\n","except Exception as e:\n"," print(f\"โ Error creating combined zip: {e}\")\n"," combined_zip_created = False\n","\n","# Clean up temporary directory\n","try:\n"," shutil.rmtree(temp_dir, ignore_errors=True)\n"," print(\"๐งน Temporary extraction files cleaned up\")\n","except:\n"," print(\"โ ๏ธ Could not clean up temporary extraction files\")\n","\n","# Remove original zip files ONLY if combined zip was created successfully\n","if combined_zip_created and original_zip_files:\n"," print(f\"\\n๐๏ธ Removing {len(original_zip_files)} original zip files...\")\n"," removed_count = 0\n","\n"," for zip_path in original_zip_files:\n"," try:\n"," if os.path.exists(zip_path):\n"," os.remove(zip_path)\n"," print(f\" ๐๏ธ Removed: {os.path.basename(zip_path)}\")\n"," removed_count += 1\n"," else:\n"," print(f\" โ ๏ธ File not found: {os.path.basename(zip_path)}\")\n"," except Exception as e:\n"," print(f\" โ Failed to remove {os.path.basename(zip_path)}: {e}\")\n","\n"," print(f\"โ
Successfully removed {removed_count}/{len(original_zip_files)} original zip files\")\n","else:\n"," print(\"\\nโ ๏ธ Skipping removal of original files - combined zip creation failed!\")\n"," print(\" Original files preserved for safety.\")\n","\n","# Final verification\n","if os.path.exists(output_zip):\n"," with zipfile.ZipFile(output_zip, 'r') as z:\n"," file_list = z.namelist()\n"," print(f\"\\n๐ Final verification:\")\n"," print(f\" ๐ Combined zip: {output_zip}\")\n"," print(f\" ๐ Total files: {len(file_list)}\")\n","\n"," images_count = len([f for f in file_list if is_image_file(f)])\n"," txts_count = len([f for f in file_list if f.endswith('.txt')])\n"," print(f\" ๐ผ๏ธ Images: {images_count}\")\n"," print(f\" ๐ Text files: {txts_count}\")\n","\n"," # Check for matching pairs\n"," image_numbers = set()\n"," txt_numbers = set()\n"," for f in file_list:\n"," if is_image_file(f):\n"," try:\n"," num = int(Path(f).stem)\n"," image_numbers.add(num)\n"," except:\n"," pass\n"," elif f.endswith('.txt'):\n"," try:\n"," num = int(Path(f).stem)\n"," txt_numbers.add(num)\n"," except:\n"," pass\n","\n"," matched_pairs = len(image_numbers & txt_numbers)\n"," print(f\" ๐ Matched image-txt pairs: {matched_pairs}\")\n"," print(f\" ๐พ Size: {os.path.getsize(output_zip) / (1024*1024):.1f} MB\")\n","\n","print(f\"\\n๐ Process completed!\")\n","print(f\"๐ Final output: {output_zip}\")\n","if combined_zip_created:\n"," print(\"โ
All original zip files have been removed.\")\n","else:\n"," print(\"โ ๏ธ Original files preserved due to error.\")"]},{"cell_type":"markdown","source":["Auto-Disconnect From Drive โฌ๏ธ"],"metadata":{"id":"UlO9vI5dxvdG"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"1JlaBNIKODCT"},"outputs":[],"source":["from google.colab import runtime\n","runtime.unassign()\n","\n","\n"]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1773754822000},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1765929505338},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1764859930834},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1763768252838},{"file_id":"1IQqB9joHm5EtmXbuQBXqydFuiBoAwiQ-","timestamp":1763755654136},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1762634251304},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1762004412712},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1761124521078},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760628088876},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}