Datasets:
Tags:
Not-For-All-Audiences
Upload civit_dataset_to_latent.ipynb
Browse files
civit_dataset_to_latent.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells":[{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')\n"],"metadata":{"id":"wKPi1UnSSmxl"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# Step 1: Clean up any old torch (you already have this, but safe to re-run)\n","!pip uninstall -y torch torchvision torchaudio diffusers\n","\n","# Step 2: Install compatible torch for P100\n","!pip install --no-deps torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121\n","\n","# Step 3: Upgrade diffusers VERY forcefully (ignore cache, upgrade deps)\n","!pip install --upgrade --no-cache-dir diffusers transformers accelerate peft safetensors tqdm huggingface-hub\n","\n","# Step 4: If the above still doesn't bring in Flux2KleinPipeline, install directly from GitHub main branch (latest dev version)\n","!pip install git+https://github.com/huggingface/diffusers.git\n","\n","# ββ Verification cell βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n","import torch\n","import diffusers\n","\n","print(\"PyTorch version: \", torch.__version__)\n","print(\"Diffusers version: \", diffusers.__version__) # MUST be something like 0.33.0.dev0 or 0.34+ after git install\n","print(\"CUDA available: \", torch.cuda.is_available())\n","print(\"GPU: \", torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"No GPU\")\n","print(\"Compute capability: \", torch.cuda.get_device_capability(0) if torch.cuda.is_available() else \"N/A\")\n","\n","# Test import immediately\n","try:\n"," from diffusers import Flux2KleinPipeline\n"," print(\"SUCCESS: Flux2KleinPipeline imported correctly!\")\n","except ImportError as e:\n"," print(\"Import still failed:\", e)"],"metadata":{"id":"eq88iplBVYnk"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import zipfile, os\n","\n","zip_path = \"/content/drive/MyDrive/kaggleset.zip\"\n","extract_path = \"/content\"\n","\n","with zipfile.ZipFile(zip_path, 'r') as z:\n"," z.extractall(extract_path)\n","\n","print(\"Extracted files:\", len(os.listdir(\"/content\")))\n"],"metadata":{"id":"1-RWlSYmS-c2"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ββ FLUX Klein VAE-only latent encoding (Colab safe) ββββββββββββββββββββββ\n","\n","import os\n","import torch\n","from PIL import Image\n","from torchvision import transforms\n","from tqdm import tqdm\n","from safetensors.torch import save_file\n","\n","from diffusers import AutoencoderKLFlux2\n","\n","# ββ Config βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","dtype = torch.float16\n","\n","IMAGE_DIR = \"/content\"\n","SAVE_PATH = \"/content/drive/MyDrive/flux_latents.safetensors\"\n","MODEL_ID = \"black-forest-labs/FLUX.2-klein-4B\"\n","\n","# ββ Load ONLY the FLUX VAE ββββββββββββββββββββββββββββββββββββββββββββββββ\n","vae = AutoencoderKLFlux2.from_pretrained(\n"," MODEL_ID,\n"," subfolder=\"vae\",\n"," torch_dtype=dtype,\n"," low_cpu_mem_usage=True,\n",").to(device).eval()\n","\n","print(\"β
Loaded FLUX Klein VAE only\")\n","print(\"VRAM allocated (GB):\", round(torch.cuda.memory_allocated() / 1e9, 2))\n","\n","# ββ Image preprocessing (FLUX expects [-1, 1]) βββββββββββββββββββββββββββ\n","image_transform = transforms.Compose([\n"," transforms.Resize((1024, 1024)),\n"," transforms.ToTensor(),\n"," transforms.Normalize([0.5], [0.5]),\n","])\n","\n","# ββ Encode images βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n","latents = {}\n","image_files = sorted(\n"," f for f in os.listdir(IMAGE_DIR) if f.lower().endswith(\".jpeg\")\n",")\n","\n","with torch.no_grad():\n"," for i, f in enumerate(tqdm(image_files, desc=\"Encoding images\")):\n"," img = Image.open(os.path.join(IMAGE_DIR, f)).convert(\"RGB\")\n"," x = image_transform(img).unsqueeze(0).to(device, dtype=dtype)\n","\n"," z = vae.encode(x).latent_dist.sample() # NO scaling for FLUX\n"," latents[f.replace(\".jpeg\", \"\")] = z.cpu()\n","\n"," # One-time sanity check\n"," if i == 0:\n"," print(\"Latent shape:\", z.shape) # MUST be [1, 32, 128, 128]\n"," print(\"Latent dtype:\", z.dtype)\n","\n","# ββ Save to safetensors βββββββββββββββββββββββββββββββββββββββββββββββββββ\n","latent_save = {f\"{k}\": v for k, v in latents.items()}\n","save_file(latent_save, SAVE_PATH)\n","\n","print(f\"β
Encoded {len(latents)} images\")\n","print(\"β
Saved latents to:\", SAVE_PATH)\n"],"metadata":{"collapsed":true,"id":"otH_Ox3ITOHO"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import json, os\n","\n","caption_map = {}\n","\n","for f in os.listdir(\"/content\"):\n"," if f.endswith(\".txt\"):\n"," key = f.replace(\".txt\",\"\")\n"," with open(f\"/content/{f}\") as fp:\n"," caption_map[key] = fp.read().strip()\n","\n","json_path = \"/content/drive/MyDrive/flux_captions.json\"\n","\n","with open(json_path, \"w\") as f:\n"," json.dump(caption_map, f, indent=2)\n","\n","print(\"Saved captions to:\", json_path)\n"],"metadata":{"id":"dy4UWvSkc6nc"},"execution_count":null,"outputs":[]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_latent.ipynb","timestamp":1768944855259},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_hf_dataset.ipynb","timestamp":1768926774209},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1768857760851},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1763646205520},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1760993725927},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760450712160},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0}
|
|
|
|
| 1 |
+
{"cells":[{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')\n"],"metadata":{"id":"wKPi1UnSSmxl"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# Step 1: Clean up any old torch (you already have this, but safe to re-run)\n","!pip uninstall -y torch torchvision torchaudio diffusers\n","\n","# Step 2: Install compatible torch for P100\n","!pip install --no-deps torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121\n","\n","# Step 3: Upgrade diffusers VERY forcefully (ignore cache, upgrade deps)\n","!pip install --upgrade --no-cache-dir diffusers transformers accelerate peft safetensors tqdm huggingface-hub\n","\n","# Step 4: If the above still doesn't bring in Flux2KleinPipeline, install directly from GitHub main branch (latest dev version)\n","!pip install git+https://github.com/huggingface/diffusers.git\n","\n","# ββ Verification cell βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n","import torch\n","import diffusers\n","\n","print(\"PyTorch version: \", torch.__version__)\n","print(\"Diffusers version: \", diffusers.__version__) # MUST be something like 0.33.0.dev0 or 0.34+ after git install\n","print(\"CUDA available: \", torch.cuda.is_available())\n","print(\"GPU: \", torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"No GPU\")\n","print(\"Compute capability: \", torch.cuda.get_device_capability(0) if torch.cuda.is_available() else \"N/A\")\n","\n","# Test import immediately\n","try:\n"," from diffusers import Flux2KleinPipeline\n"," print(\"SUCCESS: Flux2KleinPipeline imported correctly!\")\n","except ImportError as e:\n"," print(\"Import still failed:\", e)"],"metadata":{"id":"eq88iplBVYnk"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import zipfile, os\n","\n","zip_path = \"/content/drive/MyDrive/kaggleset.zip\"\n","extract_path = \"/content\"\n","\n","with zipfile.ZipFile(zip_path, 'r') as z:\n"," z.extractall(extract_path)\n","\n","print(\"Extracted files:\", len(os.listdir(\"/content\")))\n"],"metadata":{"id":"1-RWlSYmS-c2"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ββ FLUX.2 Klein β VAE-only latent encoding (NO PIPELINE LOAD) ββββββββββββ\n","\n","import os, torch\n","from PIL import Image\n","from torchvision import transforms\n","from tqdm import tqdm\n","from safetensors.torch import save_file\n","from diffusers import AutoencoderKLFlux2\n","\n","# ββ Config βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n","device = \"cuda\"\n","dtype = torch.float16\n","\n","IMAGE_DIR = \"/content\"\n","SAVE_PATH = \"/content/drive/MyDrive/flux_latents_klein.safetensors\"\n","MODEL_ID = \"black-forest-labs/FLUX.2-klein-4B\"\n","\n","# ββ Load ONLY the VAE submodule βββββββββββββββββββββββββββββββββββββββββββ\n","\n","vae = AutoencoderKLFlux2.from_pretrained(\n"," MODEL_ID,\n"," subfolder=\"vae\",\n"," torch_dtype=dtype\n",").to(device).eval()\n","\n","print(\"β
Loaded FLUX Klein VAE only\")\n","print(\"VRAM (GB):\", round(torch.cuda.memory_allocated() / 1e9, 2))\n","\n","# ββ Image preprocessing (FLUX expects [-1, 1]) ββββββββββββββββββββββββββββ\n","\n","image_transform = transforms.Compose([\n"," transforms.Resize((1024, 1024)),\n"," transforms.ToTensor(),\n"," transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n","])\n","\n","# ββ Encode images βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n","\n","latents = {}\n","image_files = sorted(f for f in os.listdir(IMAGE_DIR) if f.lower().endswith(\".jpeg\"))\n","\n","with torch.no_grad():\n"," for i, f in enumerate(tqdm(image_files, desc=\"Encoding images\")):\n"," img = Image.open(os.path.join(IMAGE_DIR, f)).convert(\"RGB\")\n"," x = image_transform(img).unsqueeze(0).to(device, dtype=dtype)\n","\n"," z = vae.encode(x).latent_dist.sample() # β οΈ NO scaling for FLUX\n"," latents[f.replace(\".jpeg\", \"\")] = z.cpu()\n","\n"," if i == 0:\n"," print(\"Latent shape:\", z.shape) # β
should be [1, 32, 128, 128]\n"," print(\"Latent dtype:\", z.dtype)\n","\n","# ββ Save latents ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n","\n","save_file(latents, SAVE_PATH)\n","print(f\"β
Saved {len(latents)} latents to:\", SAVE_PATH)\n"],"metadata":{"collapsed":true,"id":"otH_Ox3ITOHO"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import json, os\n","\n","caption_map = {}\n","\n","for f in os.listdir(\"/content\"):\n"," if f.endswith(\".txt\"):\n"," key = f.replace(\".txt\",\"\")\n"," with open(f\"/content/{f}\") as fp:\n"," caption_map[key] = fp.read().strip()\n","\n","json_path = \"/content/drive/MyDrive/flux_captions.json\"\n","\n","with open(json_path, \"w\") as f:\n"," json.dump(caption_map, f, indent=2)\n","\n","print(\"Saved captions to:\", json_path)\n"],"metadata":{"id":"dy4UWvSkc6nc"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["from safetensors.torch import load_file\n","\n","model = load_file(\"/content/flux_klein_lora_final.safetensors\")\n","\n","for key in model: print(f'the {key} has the shape {model[key].shape} and the dtype {model[key].dtype}')"],"metadata":{"collapsed":true,"id":"2YNXfJ65K9_R"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# Proper trained Klein LoRa layers (for reference)\n","from safetensors.torch import load_file\n","\n","model = load_file(\"/content/flux-spritesheet-lora.safetensors\")\n","\n","for key in model: print(f'in the valid model {key} has the shape {model[key].shape} and the dtype {model[key].dtype}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"-9ulbToVPoRW","executionInfo":{"status":"ok","timestamp":1768982015152,"user_tz":-60,"elapsed":40,"user":{"displayName":"No Name","userId":"10578412414437288386"}},"outputId":"82c170dd-57fe-48d0-d4b2-c8e5c0a64bef"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["in the valid model base_model.model.double_blocks.0.img_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.0.img_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.0.img_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.0.img_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.0.txt_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.0.txt_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.0.txt_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.0.txt_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.img_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.img_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.img_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.img_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.txt_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.txt_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.txt_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.1.txt_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.img_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.img_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.img_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.img_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.txt_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.txt_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.txt_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.2.txt_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.img_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.img_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.img_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.img_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.txt_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.txt_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.txt_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.3.txt_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.img_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.img_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.img_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.img_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.txt_attn.proj.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.txt_attn.proj.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.txt_attn.qkv.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_blocks.4.txt_attn.qkv.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_stream_modulation_img.lin.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_stream_modulation_img.lin.lora_B.weight has the shape torch.Size([18432, 16]) and the dtype torch.float32\n","in the valid model base_model.model.double_stream_modulation_txt.lin.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.double_stream_modulation_txt.lin.lora_B.weight has the shape torch.Size([18432, 16]) and the dtype torch.float32\n","in the valid model base_model.model.final_layer.linear.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.final_layer.linear.lora_B.weight has the shape torch.Size([128, 16]) and the dtype torch.float32\n","in the valid model base_model.model.img_in.lora_A.weight has the shape torch.Size([16, 128]) and the dtype torch.float32\n","in the valid model base_model.model.img_in.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.0.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.0.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.0.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.0.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.1.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.1.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.1.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.1.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.10.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.10.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.10.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.10.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.11.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.11.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.11.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.11.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.12.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.12.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.12.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.12.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.13.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.13.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.13.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.13.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.14.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.14.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.14.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.14.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.15.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.15.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.15.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.15.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.16.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.16.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.16.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.16.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.17.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.17.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.17.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.17.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.18.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.18.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.18.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.18.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.19.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.19.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.19.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.19.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.2.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.2.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.2.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.2.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.3.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.3.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.3.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.3.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.4.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.4.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.4.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.4.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.5.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.5.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.5.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.5.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.6.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.6.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.6.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.6.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.7.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.7.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.7.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.7.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.8.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.8.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.8.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.8.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.9.linear1.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.9.linear1.lora_B.weight has the shape torch.Size([27648, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.9.linear2.lora_A.weight has the shape torch.Size([16, 12288]) and the dtype torch.float32\n","in the valid model base_model.model.single_blocks.9.linear2.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.single_stream_modulation.lin.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.single_stream_modulation.lin.lora_B.weight has the shape torch.Size([9216, 16]) and the dtype torch.float32\n","in the valid model base_model.model.time_in.in_layer.lora_A.weight has the shape torch.Size([16, 256]) and the dtype torch.float32\n","in the valid model base_model.model.time_in.in_layer.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.time_in.out_layer.lora_A.weight has the shape torch.Size([16, 3072]) and the dtype torch.float32\n","in the valid model base_model.model.time_in.out_layer.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n","in the valid model base_model.model.txt_in.lora_A.weight has the shape torch.Size([16, 7680]) and the dtype torch.float32\n","in the valid model base_model.model.txt_in.lora_B.weight has the shape torch.Size([3072, 16]) and the dtype torch.float32\n"]}]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_latent.ipynb","timestamp":1768944855259},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_hf_dataset.ipynb","timestamp":1768926774209},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1768857760851},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1763646205520},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1760993725927},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760450712160},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0}
|