codeShare commited on
Commit
1d6511f
Β·
verified Β·
1 Parent(s): 716f095

Upload civit_caption_prepper.ipynb

Browse files
Files changed (1) hide show
  1. civit_caption_prepper.ipynb +1 -1
civit_caption_prepper.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"hDzdSOe90dAa"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@title WD Tagger + Loli Filter + Caption Cleaner + Tag Spreading β†’ Drive { run: \"auto\" }\n","from google.colab import drive\n","import os\n","import zipfile\n","from pathlib import Path\n","import shutil\n","from tqdm.auto import tqdm\n","import re\n","!pip install timm pillow pandas requests nltk -q\n","\n","import timm\n","import torch\n","from PIL import Image\n","import torchvision.transforms as transforms\n","import pandas as pd\n","import requests\n","from io import StringIO\n","import nltk\n","from collections import defaultdict\n","\n","# Fix for recent NLTK versions\n","nltk.download('punkt', quiet=True)\n","nltk.download('punkt_tab', quiet=True)\n","\n","# ────────────────────────────────────────────────\n","#@markdown ### Settings\n","drive.mount('/content/drive', force_remount=False)\n","\n","zip_path = \"/content/drive/MyDrive/vertical_slices_output/vertical_slices.zip\" #@param {type:\"string\"}\n","output_zip_name = \"cleaned_tagged_dataset.zip\" #@param {type:\"string\"}\n","output_folder_on_drive = \"/content/drive/MyDrive/Cleaned_Datasets\" #@param {type:\"string\"}\n","\n","case_sensitive_loli_check = False #@param {type:\"boolean\"}\n","tag_probability_threshold = 0.35 #@param {type:\"slider\", min:0.1, max:0.6, step:0.05}\n","\n","# ────────────────────────────────────────────────\n","if not zip_path or not os.path.isfile(zip_path):\n"," print(\"❌ Please provide a valid zip file path\")\n"," raise SystemExit\n","\n","print(f\"πŸ“¦ Input zip: {zip_path}\")\n","print(f\"πŸ“€ Will save: {output_folder_on_drive}/{output_zip_name}\\n\")\n","\n","# ────────────────────────────────────────────────\n","extract_dir = Path(\"/content/extracted\")\n","cleaned_dir = Path(\"/content/cleaned_dataset\")\n","\n","shutil.rmtree(extract_dir, ignore_errors=True)\n","shutil.rmtree(cleaned_dir, ignore_errors=True)\n","extract_dir.mkdir(exist_ok=True, parents=True)\n","cleaned_dir.mkdir(exist_ok=True, parents=True)\n","\n","print(\"πŸ“‚ Extracting archive...\")\n","with zipfile.ZipFile(zip_path, 'r') as zf:\n"," zf.extractall(extract_dir)\n","\n","# ────────────────────────────────────────────────\n","# Load WD tagger\n","print(\"πŸ”§ Loading WD tagger model...\")\n","tags_url = \"https://huggingface.co/SmilingWolf/wd-vit-tagger-v3/resolve/main/selected_tags.csv\"\n","tags_df = pd.read_csv(StringIO(requests.get(tags_url).text))\n","tags = tags_df['name'].tolist()\n","\n","model = timm.create_model(\"hf_hub:SmilingWolf/wd-vit-tagger-v3\", pretrained=True)\n","\n","# Define device properly (this fixes the .device attribute error)\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = model.eval().to(device)\n","\n","preprocess = transforms.Compose([\n"," transforms.Resize((448, 448)),\n"," transforms.ToTensor(),\n"," transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n","])\n","\n","def get_wd_tags(img_path):\n"," try:\n"," img = Image.open(img_path).convert(\"RGB\")\n"," x = preprocess(img).unsqueeze(0).to(device) # ← now using the correct device\n"," with torch.no_grad():\n"," logits = model(x)\n"," probs = torch.sigmoid(logits).squeeze(0).cpu().numpy()\n"," selected = [tags[i] for i, p in enumerate(probs) if p > tag_probability_threshold]\n"," return selected\n"," except Exception as e:\n"," print(f\" tagging failed: {img_path.name} β†’ {str(e)}\")\n"," return []\n","\n","# ────────────────────────────────────────────────\n","def clean_caption(text: str) -> str:\n"," if not text.strip():\n"," return \"\"\n"," text = re.sub(r'\\byoung girl\\b', 'young woman', text, flags=re.IGNORECASE)\n"," text = re.sub(r'\\bswastika\\b', 'manji', text, flags=re.IGNORECASE)\n"," text = re.sub(r'\\byoung\\b', '', text, flags=re.IGNORECASE)\n"," text = text.replace('*', '')\n"," text = re.sub(r'\\s+', ' ', text)\n"," text = text.replace('\\r\\n', ' ').replace('\\n', ' ').replace('\\r', ' ')\n"," return text.strip()\n","\n","def spread_tags_into_caption(caption: str, new_tags: list) -> str:\n"," if not new_tags:\n"," return clean_caption(caption)\n","\n"," base = clean_caption(caption)\n"," if not base:\n"," return \", \".join(new_tags)\n","\n"," sentences = nltk.sent_tokenize(base)\n"," if len(sentences) <= 1:\n"," return base + \"\\n\\n\" + \", \".join(new_tags)\n","\n"," # Distribute tags between sentences\n"," num_gaps = len(sentences) - 1\n"," tags_per_gap = max(1, len(new_tags) // num_gaps)\n"," extra = len(new_tags) % num_gaps\n","\n"," parts = []\n"," tag_idx = 0\n"," for i, sent in enumerate(sentences):\n"," parts.append(sent.strip())\n"," if i < num_gaps:\n"," cnt = tags_per_gap + (1 if i < extra else 0)\n"," if cnt > 0:\n"," group = new_tags[tag_idx : tag_idx + cnt]\n"," tag_idx += cnt\n"," parts.append(\", \".join(group))\n","\n"," # Remaining tags at the end\n"," if tag_idx < len(new_tags):\n"," parts.append(\", \".join(new_tags[tag_idx:]))\n","\n"," return \"\\n\".join(parts)\n","\n","# ────────────────────────────────────────────────\n","print(\"\\nπŸ” Processing files...\\n\")\n","\n","removed = 0\n","kept = 0\n","\n","groups = defaultdict(list)\n","for f in extract_dir.rglob(\"*\"):\n"," if f.is_file():\n"," groups[f.stem].append(f)\n","\n","for stem, files in tqdm(groups.items(), desc=\"Groups\"):\n"," imgs = [f for f in files if f.suffix.lower() in {'.jpg','.jpeg','.png','.webp','.gif','.bmp','.tiff'}]\n"," txts = [f for f in files if f.suffix.lower() == '.txt']\n","\n"," if not imgs:\n"," continue\n","\n"," img = imgs[0] # take the first image\n"," wd_tags = get_wd_tags(img)\n","\n"," has_loli = 'loli' in ((' '.join(wd_tags)).lower() if not case_sensitive_loli_check else ' '.join(wd_tags))\n","\n"," if has_loli:\n"," removed += 1\n"," for f in files:\n"," try: f.unlink()\n"," except: pass\n"," continue\n","\n"," # Keep this pair\n"," kept += 1\n","\n"," # Read original caption if exists\n"," orig_caption = \"\"\n"," if txts:\n"," try:\n"," orig_caption = txts[0].read_text(encoding=\"utf-8\", errors=\"replace\").strip()\n"," except:\n"," pass\n","\n"," # Create improved caption\n"," final_caption = spread_tags_into_caption(orig_caption, wd_tags)\n","\n"," # Copy files to cleaned folder (preserve subfolder structure)\n"," for f in files:\n"," rel = f.relative_to(extract_dir)\n"," dst = cleaned_dir / rel\n"," dst.parent.mkdir(parents=True, exist_ok=True)\n"," shutil.copy2(f, dst)\n","\n"," # Write new caption (overwrite or create .txt)\n"," txt_name = img.stem + \".txt\"\n"," txt_rel = img.relative_to(extract_dir).parent / txt_name\n"," txt_dst = cleaned_dir / txt_rel\n"," txt_dst.parent.mkdir(parents=True, exist_ok=True)\n"," with open(txt_dst, \"w\", encoding=\"utf-8\") as fw:\n"," fw.write(final_caption)\n","\n","print(f\"\\nβœ… Done processing\")\n","print(f\" Removed (loli detected): {removed}\")\n","print(f\" Kept & cleaned : {kept}\")\n","\n","# ────────────────────────────────────────────────\n","print(\"\\nπŸ—œοΈ Creating output zip...\")\n","\n","final_zip = Path(f\"/content/{output_zip_name}\")\n","\n","with zipfile.ZipFile(final_zip, \"w\", zipfile.ZIP_DEFLATED) as zf:\n"," for item in tqdm(cleaned_dir.rglob(\"*\"), desc=\"Zipping\"):\n"," if item.is_file():\n"," arc = item.relative_to(cleaned_dir)\n"," zf.write(item, arc)\n","\n","# ────────────────────────────────────────────────\n","print(\"\\nπŸ’Ύ Copying to Drive...\")\n","os.makedirs(output_folder_on_drive, exist_ok=True)\n","drive_dest = Path(output_folder_on_drive) / output_zip_name\n","shutil.copy2(final_zip, drive_dest)\n","\n","size_mb = final_zip.stat().st_size / (1024 * 1024)\n","print(f\"β†’ Saved: {drive_dest}\")\n","print(f\" Size: {size_mb:.1f} MiB\")\n","\n","# ────────────────────────────────────────────────\n","print(\"\\n🧹 Cleaning up temp folders...\")\n","shutil.rmtree(extract_dir, ignore_errors=True)\n","shutil.rmtree(cleaned_dir, ignore_errors=True)\n","\n","print(\"\\nAll finished βœ“\")"],"metadata":{"id":"wTXltVDGXX3z"},"execution_count":null,"outputs":[]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_caption_prepper.ipynb","timestamp":1773089575687},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_caption_prepper.ipynb","timestamp":1773080355474},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1772998638620},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1763646205520},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1760993725927},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760450712160},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"hDzdSOe90dAa"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@title WD Tagger + Loli Filter + Caption Cleaner + Tag Spreading β†’ Drive (single-line + comma spacing) { run: \"auto\" }\n","from google.colab import drive\n","import os\n","import zipfile\n","from pathlib import Path\n","import shutil\n","from tqdm.auto import tqdm\n","import re\n","!pip install timm pillow pandas requests nltk -q\n","\n","import timm\n","import torch\n","from PIL import Image\n","import torchvision.transforms as transforms\n","import pandas as pd\n","import requests\n","from io import StringIO\n","import nltk\n","from collections import defaultdict\n","\n","# Fix for recent NLTK versions\n","nltk.download('punkt', quiet=True)\n","nltk.download('punkt_tab', quiet=True)\n","\n","# ────────────────────────────────────────────────\n","#@markdown ### Settings\n","drive.mount('/content/drive', force_remount=False)\n","\n","zip_path = \"/content/drive/MyDrive/TA-2026-03-09-21-25-46-947477596164084244.zip\" #@param {type:\"string\"}\n","output_zip_name = \"cleaned_tagged_dataset.zip\" #@param {type:\"string\"}\n","output_folder_on_drive = \"/content/drive/MyDrive/Cleaned_Datasets\" #@param {type:\"string\"}\n","\n","case_sensitive_loli_check = False #@param {type:\"boolean\"}\n","tag_probability_threshold = 0.35 #@param {type:\"slider\", min:0.1, max:0.6, step:0.05}\n","\n","# ────────────────────────────────────────────────\n","if not zip_path or not os.path.isfile(zip_path):\n"," print(\"❌ Please provide a valid zip file path\")\n"," raise SystemExit\n","\n","print(f\"πŸ“¦ Input zip: {zip_path}\")\n","print(f\"πŸ“€ Will save: {output_folder_on_drive}/{output_zip_name}\\n\")\n","\n","# ────────────────────────────────────────────────\n","extract_dir = Path(\"/content/extracted\")\n","cleaned_dir = Path(\"/content/cleaned_dataset\")\n","\n","shutil.rmtree(extract_dir, ignore_errors=True)\n","shutil.rmtree(cleaned_dir, ignore_errors=True)\n","extract_dir.mkdir(exist_ok=True, parents=True)\n","cleaned_dir.mkdir(exist_ok=True, parents=True)\n","\n","print(\"πŸ“‚ Extracting archive...\")\n","with zipfile.ZipFile(zip_path, 'r') as zf:\n"," zf.extractall(extract_dir)\n","\n","# ────────────────────────────────────────────────\n","# Load WD tagger\n","print(\"πŸ”§ Loading WD tagger model...\")\n","tags_url = \"https://huggingface.co/SmilingWolf/wd-vit-tagger-v3/resolve/main/selected_tags.csv\"\n","tags_df = pd.read_csv(StringIO(requests.get(tags_url).text))\n","tags = tags_df['name'].tolist()\n","\n","model = timm.create_model(\"hf_hub:SmilingWolf/wd-vit-tagger-v3\", pretrained=True)\n","\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = model.eval().to(device)\n","\n","preprocess = transforms.Compose([\n"," transforms.Resize((448, 448)),\n"," transforms.ToTensor(),\n"," transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n","])\n","\n","def get_wd_tags(img_path):\n"," try:\n"," img = Image.open(img_path).convert(\"RGB\")\n"," x = preprocess(img).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," logits = model(x)\n"," probs = torch.sigmoid(logits).squeeze(0).cpu().numpy()\n"," selected = [tags[i] for i, p in enumerate(probs) if p > tag_probability_threshold]\n"," return selected\n"," except Exception as e:\n"," print(f\" tagging failed: {img_path.name} β†’ {str(e)}\")\n"," return []\n","\n","# ───────��────────────────────────────────────────\n","def clean_caption(text: str) -> str:\n"," if not text.strip():\n"," return \"\"\n"," text = re.sub(r'\\byoung girl\\b', 'young woman', text, flags=re.IGNORECASE)\n"," text = re.sub(r'\\bswastika\\b', 'manji', text, flags=re.IGNORECASE)\n"," text = re.sub(r'\\byoung\\b', '', text, flags=re.IGNORECASE)\n"," text = text.replace('*', '')\n"," text = re.sub(r'\\s+', ' ', text)\n"," text = text.replace('\\r\\n', ' ').replace('\\n', ' ').replace('\\r', ' ')\n"," return text.strip()\n","\n","def spread_tags_into_caption(caption: str, new_tags: list) -> str:\n"," if not new_tags:\n"," cleaned = clean_caption(caption)\n"," return ' , '.join(cleaned.split(',')).strip()\n","\n"," base = clean_caption(caption)\n"," if not base:\n"," return ' , '.join(new_tags)\n","\n"," sentences = nltk.sent_tokenize(base)\n"," if len(sentences) <= 1:\n"," combined = base + \" \" + \" , \".join(new_tags)\n"," else:\n"," # Distribute tags between sentences\n"," num_gaps = len(sentences) - 1\n"," tags_per_gap = max(1, len(new_tags) // num_gaps)\n"," extra = len(new_tags) % num_gaps\n","\n"," parts = []\n"," tag_idx = 0\n"," for i, sent in enumerate(sentences):\n"," parts.append(sent.strip())\n"," if i < num_gaps:\n"," cnt = tags_per_gap + (1 if i < extra else 0)\n"," if cnt > 0:\n"," group = new_tags[tag_idx : tag_idx + cnt]\n"," tag_idx += cnt\n"," parts.append(\" , \".join(group))\n","\n"," # Remaining tags\n"," if tag_idx < len(new_tags):\n"," parts.append(\" , \".join(new_tags[tag_idx:]))\n","\n"," combined = \" \".join(parts)\n","\n"," # Final cleanup: ensure exactly one space after each comma\n"," combined = re.sub(r'\\s*,\\s*', ' , ', combined)\n"," combined = re.sub(r'\\s+', ' ', combined).strip()\n","\n"," return combined\n","\n","# ────────────────────────────────────────────────\n","print(\"\\nπŸ” Processing files...\\n\")\n","\n","removed = 0\n","kept = 0\n","\n","groups = defaultdict(list)\n","for f in extract_dir.rglob(\"*\"):\n"," if f.is_file():\n"," groups[f.stem].append(f)\n","\n","for stem, files in tqdm(groups.items(), desc=\"Groups\"):\n"," imgs = [f for f in files if f.suffix.lower() in {'.jpg','.jpeg','.png','.webp','.gif','.bmp','.tiff'}]\n"," txts = [f for f in files if f.suffix.lower() == '.txt']\n","\n"," if not imgs:\n"," continue\n","\n"," img = imgs[0] # take the first image\n"," wd_tags = get_wd_tags(img)\n","\n"," has_loli = 'loli' in ((' '.join(wd_tags)).lower() if not case_sensitive_loli_check else ' '.join(wd_tags))\n","\n"," if has_loli:\n"," removed += 1\n"," for f in files:\n"," try: f.unlink()\n"," except: pass\n"," continue\n","\n"," # Keep this pair\n"," kept += 1\n","\n"," # Read original caption if exists\n"," orig_caption = \"\"\n"," if txts:\n"," try:\n"," orig_caption = txts[0].read_text(encoding=\"utf-8\", errors=\"replace\").strip()\n"," except:\n"," pass\n","\n"," # Create final single-line caption\n"," final_caption = spread_tags_into_caption(orig_caption, wd_tags)\n","\n"," # Copy files to cleaned folder (preserve subfolder structure)\n"," for f in files:\n"," rel = f.relative_to(extract_dir)\n"," dst = cleaned_dir / rel\n"," dst.parent.mkdir(parents=True, exist_ok=True)\n"," shutil.copy2(f, dst)\n","\n"," # Write new caption (single line, space after commas)\n"," txt_name = img.stem + \".txt\"\n"," txt_rel = img.relative_to(extract_dir).parent / txt_name\n"," txt_dst = cleaned_dir / txt_rel\n"," txt_dst.parent.mkdir(parents=True, exist_ok=True)\n"," with open(txt_dst, \"w\", encoding=\"utf-8\") as fw:\n"," fw.write(final_caption)\n","\n","print(f\"\\nβœ… Done processing\")\n","print(f\" Removed (loli detected): {removed}\")\n","print(f\" Kept & cleaned : {kept}\")\n","\n","# ────────────────────────────────────────────────\n","print(\"\\nπŸ—œοΈ Creating output zip...\")\n","\n","final_zip = Path(f\"/content/{output_zip_name}\")\n","\n","with zipfile.ZipFile(final_zip, \"w\", zipfile.ZIP_DEFLATED) as zf:\n"," for item in tqdm(cleaned_dir.rglob(\"*\"), desc=\"Zipping\"):\n"," if item.is_file():\n"," arc = item.relative_to(cleaned_dir)\n"," zf.write(item, arc)\n","\n","# ────────────────────────────────────────────────\n","print(\"\\nπŸ’Ύ Copying to Drive...\")\n","os.makedirs(output_folder_on_drive, exist_ok=True)\n","drive_dest = Path(output_folder_on_drive) / output_zip_name\n","shutil.copy2(final_zip, drive_dest)\n","\n","size_mb = final_zip.stat().st_size / (1024 * 1024)\n","print(f\"β†’ Saved: {drive_dest}\")\n","print(f\" Size: {size_mb:.1f} MiB\")\n","\n","# ────────────────────────────────────────────────\n","print(\"\\n🧹 Cleaning up temp folders...\")\n","shutil.rmtree(extract_dir, ignore_errors=True)\n","shutil.rmtree(cleaned_dir, ignore_errors=True)\n","\n","print(\"\\nAll finished βœ“\")"],"metadata":{"id":"2TZb6inbZTVX"},"execution_count":null,"outputs":[]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_caption_prepper.ipynb","timestamp":1773090196076},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_caption_prepper.ipynb","timestamp":1773089575687},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_caption_prepper.ipynb","timestamp":1773080355474},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1772998638620},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1763646205520},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1760993725927},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760450712160},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0}