Datasets:
Tags:
Not-For-All-Audiences
Upload civit_dataset_to_latent.ipynb
Browse files
civit_dataset_to_latent.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"cells":[{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"wKPi1UnSSmxl","executionInfo":{"status":"ok","timestamp":1768930656484,"user_tz":-60,"elapsed":15191,"user":{"displayName":"No Name","userId":"10578412414437288386"}},"outputId":"c29e42d3-ea2e-42fb-caf3-b872f6af850c"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]},{"cell_type":"code","source":["!pip install -U \\\n"," diffusers \\\n"," transformers \\\n"," accelerate \\\n"," safetensors \\\n"," huggingface_hub \\\n"," sentencepiece \\\n"," torchvision\n"],"metadata":{"id":"eq88iplBVYnk"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import zipfile, os\n","\n","zip_path = \"/content/kaggleset.zip\"\n","extract_path = \"/content\"\n","\n","with zipfile.ZipFile(zip_path, 'r') as z:\n"," z.extractall(extract_path)\n","\n","print(\"Extracted files:\", len(os.listdir(\"/content\")))\n"],"metadata":{"id":"1-RWlSYmS-c2"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import torch\n","from diffusers import AutoencoderKL\n","from PIL import Image\n","from torchvision import transforms\n","from tqdm import tqdm\n","import os\n","\n","device = \"cuda\"\n","\n","vae_id = \"black-forest-labs/FLUX.2-klein-4B\"\n","\n","vae = AutoencoderKL.from_pretrained(\n"," vae_id,\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float16\n",").to(device).eval()\n","\n","image_transform = transforms.Compose([\n"," transforms.Resize((1024,1024)),\n"," transforms.ToTensor(),\n"," transforms.Normalize([0.5],[0.5])\n","])\n","\n","latents = {}\n","image_files = sorted([f for f in os.listdir(\"/content\") if f.endswith(\".jpeg\")])\n","\n","with torch.no_grad():\n"," for f in tqdm(image_files):\n"," img = Image.open(f\"/content/{f}\").convert(\"RGB\")\n"," t = image_transform(img).unsqueeze(0).to(device, dtype=torch.float16)\n"," latent = vae.encode(t).latent_dist.sample() * 0.18215\n"," latents[f.replace(\".jpeg\",\"\")] = latent.cpu()\n","\n","print(\"Encoded images:\", len(latents))\n"],"metadata":{"collapsed":true,"id":"otH_Ox3ITOHO"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import gc, torch\n","\n","#del vae\n","torch.cuda.empty_cache()\n","gc.collect()\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"LRInhR1STlEM","executionInfo":{"status":"ok","timestamp":1768929524490,"user_tz":-60,"elapsed":1641,"user":{"displayName":"No Name","userId":"10578412414437288386"}},"outputId":"94e47dc3-2e3e-44b2-fb21-39adbf457a36"},"execution_count":9,"outputs":[{"output_type":"execute_result","data":{"text/plain":["209"]},"metadata":{},"execution_count":9}]},{"cell_type":"code","source":["from safetensors.torch import save_file\n","\n","latent_save = {}\n","for k, v in latents.items():\n"," latent_save[f\"latent.{k}\"] = v\n","\n","latent_path = \"/content/drive/MyDrive/flux_latents.safetensors\"\n","save_file(latent_save, latent_path)\n","\n","print(\"Saved latents to:\", latent_path)\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"KrJ4s_9JctHc","executionInfo":{"status":"ok","timestamp":1768930101404,"user_tz":-60,"elapsed":1112,"user":{"displayName":"No Name","userId":"10578412414437288386"}},"outputId":"5cd4eee5-4626-4477-c671-1ec52c925ae1"},"execution_count":10,"outputs":[{"output_type":"stream","name":"stdout","text":["Saved latents to: /content/drive/MyDrive/flux_latents.safetensors\n"]}]},{"cell_type":"code","source":["import json, os\n","\n","caption_map = {}\n","\n","for f in os.listdir(\"/content\"):\n"," if f.endswith(\".txt\"):\n"," key = f.replace(\".txt\",\"\")\n"," with open(f\"/content/{f}\") as fp:\n"," caption_map[key] = fp.read().strip()\n","\n","json_path = \"/content/drive/MyDrive/flux_captions.json\"\n","\n","with open(json_path, \"w\") as f:\n"," json.dump(caption_map, f, indent=2)\n","\n","print(\"Saved captions to:\", json_path)\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"dy4UWvSkc6nc","executionInfo":{"status":"ok","timestamp":1768930133737,"user_tz":-60,"elapsed":22,"user":{"displayName":"No Name","userId":"10578412414437288386"}},"outputId":"de13cf1f-c732-4d87-c27e-138041d11945"},"execution_count":11,"outputs":[{"output_type":"stream","name":"stdout","text":["Saved captions to: /content/drive/MyDrive/flux_captions.json\n"]}]},{"cell_type":"code","source":["!pip install -U diffusers accelerate safetensors peft\n"],"metadata":{"id":"1Q5eLbFEdDe0"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import json, torch\n","from diffusers import FluxPipeline\n","from tqdm import tqdm\n","\n","device = \"cuda\" # DiT later\n","text_device = \"cpu\" # keep encoders on CPU to save VRAM\n","dtype = torch.float16\n","\n","# ------------------\n","# Load captions\n","# ------------------\n","\n","caption_path = \"/content/drive/MyDrive/flux_captions.json\"\n","\n","with open(caption_path) as f:\n"," captions = json.load(f)\n","\n","keys = list(captions.keys())\n","\n","# ------------------\n","# Load pipeline\n","# ------------------\n","\n","pipe = FluxPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-4B\",\n"," torch_dtype=dtype\n",")\n","\n","pipe.text_encoder.to(text_device).eval()\n","pipe.text_encoder_2.to(text_device).eval()\n","\n","tokenizer = pipe.tokenizer\n","tokenizer_2 = pipe.tokenizer_2\n","\n","# ------------------\n","# Encode once\n","# ------------------\n","\n","text_cache = {}\n","\n","with torch.no_grad():\n"," for k in tqdm(keys):\n"," caption = captions[k]\n","\n"," t1 = tokenizer(\n"," caption,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=256,\n"," return_tensors=\"pt\"\n"," ).to(text_device)\n","\n"," t2 = tokenizer_2(\n"," caption,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=256,\n"," return_tensors=\"pt\"\n"," ).to(text_device)\n","\n"," e1 = pipe.text_encoder(**t1).last_hidden_state.cpu()\n"," e2 = pipe.text_encoder_2(**t2).last_hidden_state.cpu()\n","\n"," text_cache[k] = (e1, e2)\n","\n","print(\"Cached text embeddings:\", len(text_cache))\n"],"metadata":{"id":"wZVsUBeqe92P"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import gc, torch\n","\n","del pipe.text_encoder, pipe.text_encoder_2, tokenizer, tokenizer_2\n","torch.cuda.empty_cache()\n","gc.collect()\n"],"metadata":{"id":"Cn4nOkayfW9D"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import torch, json, random\n","from torch.utils.data import Dataset, DataLoader\n","from safetensors.torch import load_file\n","from diffusers import FluxPipeline\n","from peft import LoraConfig, get_peft_model\n","from tqdm import tqdm\n","\n","device = \"cuda\"\n","dtype = torch.float16\n","\n","# -----------------------\n","# Load latents\n","# -----------------------\n","\n","latent_path = \"/content/drive/MyDrive/flux_latents.safetensors\"\n","latents = load_file(latent_path)\n","\n","# -----------------------\n","# Use cached text\n","# -----------------------\n","\n","keys = list(text_cache.keys())\n","\n","# -----------------------\n","# Dataset\n","# -----------------------\n","\n","class FluxLatentDataset(Dataset):\n"," def __len__(self):\n"," return len(keys)\n","\n"," def __getitem__(self, idx):\n"," k = keys[idx]\n"," return latents[f\"latent.{k}\"], text_cache[k]\n","\n","dataset = FluxLatentDataset()\n","loader = DataLoader(dataset, batch_size=1, shuffle=True)\n","\n","# -----------------------\n","# Load pipeline (no text encoders needed)\n","# -----------------------\n","\n","pipe = FluxPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-4B\",\n"," torch_dtype=dtype\n",")\n","\n","pipe.transformer.to(device)\n","pipe.vae = None\n","\n","# -----------------------\n","# Inject LoRA\n","# -----------------------\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=16,\n"," target_modules=[\n"," \"to_q\", \"to_k\", \"to_v\", \"to_out\",\n"," \"fc1\", \"fc2\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","pipe.transformer = get_peft_model(pipe.transformer, lora_config)\n","pipe.transformer.train()\n","\n","optimizer = torch.optim.AdamW(pipe.transformer.parameters(), lr=1e-4)\n","\n","# -----------------------\n","# Training loop\n","# -----------------------\n","\n","steps = 800\n","\n","for step in range(steps):\n"," latent, (enc1, enc2) = random.choice(dataset)\n","\n"," latent = latent.to(device)\n"," enc1 = enc1.to(device, dtype)\n"," enc2 = enc2.to(device, dtype)\n","\n"," noise = torch.randn_like(latent)\n"," t = torch.randint(0, pipe.scheduler.config.num_train_timesteps, (1,), device=device)\n","\n"," noisy_latent = pipe.scheduler.add_noise(latent, noise, t)\n","\n"," pred = pipe.transformer(\n"," noisy_latent,\n"," timestep=t,\n"," encoder_hidden_states=enc1,\n"," encoder_hidden_states_2=enc2\n"," ).sample\n","\n"," loss = torch.nn.functional.mse_loss(pred, noise)\n","\n"," loss.backward()\n"," optimizer.step()\n"," optimizer.zero_grad()\n","\n"," if step % 50 == 0:\n"," print(f\"Step {step} | Loss {loss.item():.4f}\")\n","\n","# -----------------------\n","# Save LoRA\n","# -----------------------\n","\n","lora_out = \"/content/drive/MyDrive/flux_klein_lora.safetensors\"\n","pipe.transformer.save_pretrained(lora_out)\n","print(\"Saved LoRA to:\", lora_out)\n"],"metadata":{"id":"4s8bVhR7gIU9"},"execution_count":null,"outputs":[]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_hf_dataset.ipynb","timestamp":1768926774209},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1768857760851},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1763646205520},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1760993725927},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760450712160},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0}
|