codeShare commited on
Commit
d149d30
·
verified ·
1 Parent(s): e9fb384

Upload Deep Panel Manga Extract.ipynb

Browse files
Files changed (1) hide show
  1. Deep Panel Manga Extract.ipynb +1 -1
Deep Panel Manga Extract.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"G9yAxL_ViF7y"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["Extract panels from images using trained model"],"metadata":{"id":"XJvrb0cuY3P7"}},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"lMJtrtME8IVE"},"outputs":[],"source":["# Cell 1: Unzip the new dataset\n","#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/mia_clusters.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip {path}\n","\n","# Cell 2: Setup and panel extraction\n","# Install required dependencies\n","!pip install tensorflow\n","!pip install opencv-python-headless\n","!pip install numpy\n","!pip install tqdm\n","!pip install datasets # For Hugging Face dataset\n","\n","# Clone the DeepPanel repository (for any required utilities)\n","!git clone https://github.com/pedrovgs/DeepPanel.git\n","%cd DeepPanel\n","\n","# Import necessary libraries\n","import os\n","import cv2\n","import numpy as np\n","import tensorflow as tf\n","from tqdm import tqdm\n","from google.colab import drive\n","from datasets import Dataset\n","import pandas as pd\n","import zipfile\n","\n","# Mount Google Drive\n","drive.mount('/content/drive')\n","\n","# Verify dataset structure\n","!ls /content/content\n","!ls /content/content/clusters\n","# Verify a sample cluster (uncomment if needed for debugging)\n","# !ls /content/content/clusters/cluster_1\n","\n","# Define paths\n","model_path = '/content/drive/MyDrive/deeppanel_model.keras' # Updated to .keras\n","clusters_path = '/content/content/clusters'\n","output_dir = '/content/drive/MyDrive/extracted_panels'\n","zip_output_path = '/content/drive/MyDrive/extracted_panels.zip'\n","\n","# Create output directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","# Load the trained model\n","model = tf.keras.models.load_model(model_path)\n","\n","# Function to preprocess image for model input\n","def preprocess_image(image_path, target_size=(256, 256)):\n"," image = cv2.imread(image_path)\n"," if image is None:\n"," raise ValueError(f\"Failed to load image: {image_path}\")\n"," image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n"," original_shape = image.shape[:2]\n"," image = cv2.resize(image, target_size)\n"," image = image / 255.0 # Normalize to [0, 1]\n"," return image, original_shape\n","\n","# Function to post-process mask and extract panels\n","def extract_panels(image_path, mask, original_shape, output_dir, cluster_name, image_idx):\n"," # Resize mask to original image size\n"," mask = cv2.resize(mask, (original_shape[1], original_shape[0]), interpolation=cv2.INTER_NEAREST)\n"," mask = (mask > 0.5).astype(np.uint8) * 255 # Threshold to binary mask\n","\n"," # Find contours\n"," contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n","\n"," # Load original image for cropping\n"," original_image = cv2.imread(image_path)\n"," if original_image is None:\n"," raise ValueError(f\"Failed to load original image: {image_path}\")\n","\n"," # Create output directory for this cluster\n"," cluster_output_dir = os.path.join(output_dir, cluster_name)\n"," os.makedirs(cluster_output_dir, exist_ok=True)\n","\n"," # Extract panels\n"," for i, contour in enumerate(contours):\n"," # Get bounding box for each contour\n"," x, y, w, h = cv2.boundingRect(contour)\n"," # Skip small contours (optional, adjust threshold as needed)\n"," if w * h < 1000: # Ignore small regions (e.g., noise)\n"," continue\n"," # Crop panel from original image\n"," panel = original_image[y:y+h, x:x+w]\n"," # Save panel\n"," panel_path = os.path.join(cluster_output_dir, f'panel_{image_idx}_{i}.jpg')\n"," cv2.imwrite(panel_path, panel)\n","\n","# Option 1: Process images directly (if dataset is small)\n","def process_images_directly(clusters_path, model, output_dir):\n"," cluster_folders = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')]\n","\n"," for cluster_name in tqdm(cluster_folders, desc=\"Processing clusters\"):\n"," cluster_path = os.path.join(clusters_path, cluster_name)\n"," image_files = [f for f in os.listdir(cluster_path) if f.endswith(('.jpg', '.jpeg'))]\n","\n"," for idx, image_file in enumerate(tqdm(image_files, desc=f\"Processing {cluster_name}\")):\n"," image_path = os.path.join(cluster_path, image_file)\n"," try:\n"," # Preprocess image\n"," image, original_shape = preprocess_image(image_path)\n"," # Predict mask\n"," mask = model.predict(np.expand_dims(image, axis=0), verbose=0)[0]\n"," if mask.shape[-1] > 1: # Handle multi-class masks\n"," mask = np.argmax(mask, axis=-1) # Convert to single-channel\n"," else:\n"," mask = mask[..., 0] # Binary mask\n"," # Extract and save panels\n"," extract_panels(image_path, mask, original_shape, output_dir, cluster_name, idx)\n"," except Exception as e:\n"," print(f\"Error processing {image_path}: {e}\")\n","\n","# Option 2: Create Hugging Face dataset (for large datasets)\n","def create_hf_dataset(clusters_path):\n"," data = []\n"," cluster_folders = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')]\n","\n"," for cluster_name in cluster_folders:\n"," cluster_path = os.path.join(clusters_path, cluster_name)\n"," image_files = [f for f in os.listdir(cluster_path) if f.endswith(('.jpg', '.jpeg'))]\n"," for image_file in image_files:\n"," data.append({\n"," 'image_path': os.path.join(cluster_path, image_file),\n"," 'cluster': cluster_name\n"," })\n","\n"," df = pd.DataFrame(data)\n"," dataset = Dataset.from_pandas(df)\n"," return dataset\n","\n","# Choose processing method\n","use_hf_dataset = False # Set to True for large datasets\n","\n","if use_hf_dataset:\n"," # Process using Hugging Face dataset\n"," dataset = create_hf_dataset(clusters_path)\n"," for example in tqdm(dataset, desc=\"Processing images\"):\n"," image_path = example['image_path']\n"," cluster_name = example['cluster']\n"," try:\n"," image, original_shape = preprocess_image(image_path)\n"," mask = model.predict(np.expand_dims(image, axis=0), verbose=0)[0]\n"," if mask.shape[-1] > 1:\n"," mask = np.argmax(mask, axis=-1)\n"," else:\n"," mask = mask[..., 0]\n"," extract_panels(image_path, mask, original_shape, output_dir, cluster_name, example['index'])\n"," except Exception as e:\n"," print(f\"Error processing {image_path}: {e}\")\n","else:\n"," # Process directly\n"," process_images_directly(clusters_path, model, output_dir)\n","\n","# Cell 3: Zip and save extracted panels to Google Drive\n","def zip_directory(directory, zip_path):\n"," with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," for root, _, files in os.walk(directory):\n"," for file in files:\n"," file_path = os.path.join(root, file)\n"," arcname = os.path.relpath(file_path, directory)\n"," zipf.write(file_path, os.path.join('extracted_panels', arcname))\n","\n","# Create zip file\n","#zip_directory(output_dir, zip_output_path)\n","#print(f\"Extracted panels saved to {output_dir}\")\n","#print(f\"Zipped panels saved to {zip_output_path}\")\n","\n","# Optional: Debug a sample prediction\n","sample_cluster = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')][0]\n","sample_image = [f for f in os.listdir(os.path.join(clusters_path, sample_cluster)) if f.endswith(('.jpg', '.jpeg'))][0]\n","sample_image_path = os.path.join(clusters_path, sample_cluster, sample_image)\n","sample_image, original_shape = preprocess_image(sample_image_path)\n","sample_mask = model.predict(np.expand_dims(sample_image, axis=0), verbose=0)[0]\n","if sample_mask.shape[-1] > 1:\n"," sample_mask = np.argmax(sample_mask, axis=-1)\n","else:\n"," sample_mask = sample_mask[..., 0]\n","cv2.imwrite('/content/sample_pred_mask.png', sample_mask * 255)\n","print(\"Sample predicted mask saved to /content/sample_pred_mask.png\")"]},{"cell_type":"code","source":["import os\n","import zipfile\n","import random\n","from pathlib import Path\n","\n","# Define the root folder containing subfolders with images\n","root_folder = \"/content/drive/MyDrive/extracted_panels\"\n","\n","# Generate a random 5-digit number\n","random_number = f\"{random.randint(0, 99999):05d}\"\n","\n","# Define the output zip file path\n","zip_file_name = f\"/content/drive/MyDrive/training_data_{random_number}.zip\"\n","\n","# Create a zip file\n","with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," # Walk through all subfolders in the root folder\n"," for subfolder in Path(root_folder).glob(\"cluster_*\"):\n"," if subfolder.is_dir():\n"," # Iterate through all files in the subfolder\n"," for file_path in subfolder.glob(\"*\"):\n"," if file_path.is_file() and file_path.suffix.lower() in ('.jpg', '.jpeg', '.png', '.bmp', '.gif'):\n"," # Add the image to the zip file, preserving the subfolder structure\n"," zipf.write(file_path, arcname=file_path.relative_to(root_folder))\n","\n","print(f\"Images zipped successfully into {zip_file_name}\")"],"metadata":{"id":"rkuGaY8pX2bY"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Create numbered set for further clustering"],"metadata":{"id":"kFMxfm5RYvJt"}},{"cell_type":"code","source":["import os\n","import zipfile\n","import random\n","from pathlib import Path\n","\n","# Define the root folder containing subfolders with images\n","root_folder = \"/content/drive/MyDrive/extracted_panels\"\n","\n","# Generate a random 5-digit number\n","random_number = f\"{random.randint(0, 99999):05d}\"\n","\n","# Define the output zip file path\n","zip_file_name = f\"/content/drive/MyDrive/training_data_{random_number}.zip\"\n","\n","# Create a zip file\n","with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," # Walk through all subfolders in the root folder\n"," for subfolder in Path(root_folder).glob(\"cluster_*\"):\n"," if subfolder.is_dir():\n"," # Iterate through all files in the subfolder\n"," for file_path in subfolder.glob(\"*\"):\n"," if file_path.is_file() and file_path.suffix.lower() in ('.jpg', '.jpeg', '.png', '.bmp', '.gif'):\n"," # Add the image to the zip file, preserving the subfolder structure\n"," zipf.write(file_path, arcname=file_path.relative_to(root_folder))\n","\n","print(f\"Images zipped successfully into {zip_file_name}\")\n"],"metadata":{"id":"MR7C33klYD3E"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UPzIJ402VVMd"},"source":["Train a Deep Panel Model\n","\n","Refer to https://github.com/pedrovgs/DeepPanel in how to build a dataset"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"JFtsOMxP6dJ7"},"outputs":[],"source":["# Install required dependencies\n","!pip install tensorflow\n","!pip install opencv-python-headless\n","!pip install numpy\n","!pip install tqdm\n","\n","# Clone the DeepPanel repository\n","!git clone https://github.com/pedrovgs/DeepPanel.git\n","%cd DeepPanel\n","\n","# Import necessary libraries\n","import os\n","import zipfile\n","from google.colab import drive\n","import tensorflow as tf\n","import cv2\n","import numpy as np\n","from tqdm import tqdm\n","\n","# Mount Google Drive\n","drive.mount('/content/drive')\n","\n","# Unzip the dataset\n","dataset_path = '/content/drive/MyDrive/mia_panel_dataset.zip'\n","extract_path = '/content/mia_panel_dataset'\n","\n","with zipfile.ZipFile(dataset_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_path)\n","\n","# Remove __MACOSX folder if it exists\n","!rm -rf /content/mia_panel_dataset/__MACOSX\n","\n","# Verify dataset structure\n","!ls /content/mia_panel_dataset\n","#!ls /content/mia_panel_dataset\n","# Verify number of files in training folders\n","!echo \"Training raw images:\"\n","!ls -l /content/mia_panel_dataset/training/raw | wc -l\n","!echo \"Training masks:\"\n","!ls -l /content/mia_panel_dataset/training/segmentation_mask | wc -l\n","# Verify subfolder structure (uncomment if needed for debugging)\n","# !ls /content/mia_panel_dataset/mia_panel_dataset/test\n","# !ls /content/mia_panel_dataset/mia_panel_dataset/training\n","\n","# Create necessary directories for model checkpoints\n","os.makedirs('checkpoints', exist_ok=True)\n","\n","# Define dataset paths (updated for nested mia_panel_dataset folder)\n","train_raw_path = '/content/mia_panel_dataset/training/raw'\n","train_mask_path = '/content/mia_panel_dataset/training/segmentation_mask'\n","test_raw_path = '/content/mia_panel_dataset/test/raw'\n","test_mask_path = '/content/mia_panel_dataset/test/segmentation_mask'\n","\n","# Define configuration\n","class Config:\n"," INPUT_SHAPE = (256, 256, 3) # Adjust based on your image size\n"," BATCH_SIZE = 2 # Reduced to handle small datasets\n"," EPOCHS = 300\n"," LEARNING_RATE = 1e-4\n"," MODEL_PATH = 'checkpoints/model.keras' # Updated to .keras format\n","\n","# Custom data loader\n","def load_image_and_mask(image_path, mask_path, target_size):\n"," image = tf.io.read_file(image_path)\n"," image = tf.image.decode_png(image, channels=3)\n"," image = tf.image.resize(image, target_size[:2])\n"," image = image / 255.0 # Normalize to [0, 1]\n","\n"," mask = tf.io.read_file(mask_path)\n"," mask = tf.image.decode_png(mask, channels=1)\n"," mask = tf.image.resize(mask, target_size[:2], method='nearest')\n"," mask = tf.cast(mask, tf.float32)\n"," # Normalize mask to [0, 1] for binary segmentation\n"," mask = mask / tf.reduce_max(mask) # Ensure mask values are [0, 1]\n"," mask = tf.where(mask > 0.5, 1.0, 0.0) # Binarize mask\n","\n"," return image, mask\n","\n","def create_dataset(raw_path, mask_path, batch_size, input_shape, is_train=True):\n"," image_files = sorted([os.path.join(raw_path, f) for f in os.listdir(raw_path) if f.endswith(('.png', '.jpg', '.jpeg'))])\n"," mask_files = sorted([os.path.join(mask_path, f) for f in os.listdir(mask_path) if f.endswith(('.png', '.jpg', '.jpeg'))])\n","\n"," # Ensure matching pairs\n"," print(f\"Found {len(image_files)} images and {len(mask_files)} masks\")\n"," assert len(image_files) == len(mask_files), \"Number of images and masks must match\"\n"," assert len(image_files) > 0, \"No images found in dataset\"\n","\n"," dataset = tf.data.Dataset.from_tensor_slices((image_files, mask_files))\n"," dataset = dataset.map(\n"," lambda x, y: load_image_and_mask(x, y, input_shape),\n"," num_parallel_calls=tf.data.AUTOTUNE\n"," )\n","\n"," if is_train:\n"," dataset = dataset.shuffle(buffer_size=1000)\n","\n"," dataset = dataset.batch(batch_size).prefetch(tf.data.AUTOTUNE)\n"," return dataset\n","\n","# Create datasets\n","train_dataset = create_dataset(\n"," train_raw_path, train_mask_path, Config.BATCH_SIZE, Config.INPUT_SHAPE, is_train=True\n",")\n","test_dataset = create_dataset(\n"," test_raw_path, test_mask_path, Config.BATCH_SIZE, Config.INPUT_SHAPE, is_train=False\n",")\n","\n","# Inspect a sample mask to verify format (optional debugging)\n","sample_image, sample_mask = next(iter(train_dataset))\n","print(f\"Sample mask shape: {sample_mask.shape}, min: {tf.reduce_min(sample_mask)}, max: {tf.reduce_max(sample_mask)}\")\n","\n","# Define the model (simplified U-Net inspired by DeepPanel's segmentation goal)\n","def build_model(input_shape):\n"," inputs = tf.keras.Input(shape=input_shape)\n","\n"," # Encoder\n"," c1 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(inputs)\n"," c1 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(c1)\n"," p1 = tf.keras.layers.MaxPooling2D()(c1)\n","\n"," c2 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(p1)\n"," c2 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(c2)\n"," p2 = tf.keras.layers.MaxPooling2D()(c2)\n","\n"," # Bottleneck\n"," b = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(p2)\n"," b = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(b)\n","\n"," # Decoder\n"," u1 = tf.keras.layers.Conv2DTranspose(128, 2, strides=2, padding='same')(b)\n"," u1 = tf.keras.layers.Concatenate()([u1, c2])\n"," c3 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(u1)\n"," c3 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(c3)\n","\n"," u2 = tf.keras.layers.Conv2DTranspose(64, 2, strides=2, padding='same')(c3)\n"," u2 = tf.keras.layers.Concatenate()([u2, c1])\n"," c4 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(u2)\n"," c4 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(c4)\n","\n"," outputs = tf.keras.layers.Conv2D(1, 1, activation='sigmoid')(c4) # Binary segmentation\n"," model = tf.keras.Model(inputs, outputs)\n"," return model\n","\n","# Build and compile model\n","model = build_model(Config.INPUT_SHAPE)\n","model.compile(\n"," optimizer=tf.keras.optimizers.Adam(learning_rate=Config.LEARNING_RATE),\n"," loss='binary_crossentropy', # Adjust if masks are multi-class\n"," metrics=['accuracy']\n",")\n","\n","# Train the model\n","history = model.fit(\n"," train_dataset,\n"," validation_data=test_dataset,\n"," epochs=Config.EPOCHS,\n"," callbacks=[\n"," tf.keras.callbacks.ModelCheckpoint(\n"," Config.MODEL_PATH, save_best_only=True, monitor='val_loss'\n"," )\n"," ]\n",")\n","\n","# Save the trained model to Google Drive\n","!cp checkpoints/model.keras /content/drive/MyDrive/deeppanel_model.keras"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Mp4 merge.ipynb","timestamp":1761494340884},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Mp4 merge.ipynb","timestamp":1761480798787},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1761335712919},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1760993725927},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760450712160},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"G9yAxL_ViF7y"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["Extract panels from images using trained model"],"metadata":{"id":"XJvrb0cuY3P7"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"lMJtrtME8IVE"},"outputs":[],"source":["# Cell 1: Unzip the new dataset\n","#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/hhhclusters.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip {path}\n","\n","# Cell 2: Setup and panel extraction\n","# Install required dependencies\n","!pip install tensorflow\n","!pip install opencv-python-headless\n","!pip install numpy\n","!pip install tqdm\n","!pip install datasets # For Hugging Face dataset\n","\n","# Clone the DeepPanel repository (for any required utilities)\n","!git clone https://github.com/pedrovgs/DeepPanel.git\n","%cd DeepPanel\n","\n","# Import necessary libraries\n","import os\n","import cv2\n","import numpy as np\n","import tensorflow as tf\n","from tqdm import tqdm\n","from google.colab import drive\n","from datasets import Dataset\n","import pandas as pd\n","import zipfile\n","\n","# Mount Google Drive\n","drive.mount('/content/drive')\n","\n","# Verify dataset structure\n","!ls /content/content\n","!ls /content/content/clusters\n","# Verify a sample cluster (uncomment if needed for debugging)\n","# !ls /content/content/clusters/cluster_1\n","\n","# Define paths\n","model_path = '/content/drive/MyDrive/deeppanel_model.keras' # Updated to .keras\n","clusters_path = '/content/content/clusters'\n","output_dir = '/content/drive/MyDrive/extracted_panels'\n","zip_output_path = '/content/drive/MyDrive/extracted_panels.zip'\n","\n","# Create output directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","# Load the trained model\n","model = tf.keras.models.load_model(model_path)\n","\n","# Function to preprocess image for model input\n","def preprocess_image(image_path, target_size=(256, 256)):\n"," image = cv2.imread(image_path)\n"," if image is None:\n"," raise ValueError(f\"Failed to load image: {image_path}\")\n"," image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n"," original_shape = image.shape[:2]\n"," image = cv2.resize(image, target_size)\n"," image = image / 255.0 # Normalize to [0, 1]\n"," return image, original_shape\n","\n","# Function to post-process mask and extract panels\n","def extract_panels(image_path, mask, original_shape, output_dir, cluster_name, image_idx):\n"," # Resize mask to original image size\n"," mask = cv2.resize(mask, (original_shape[1], original_shape[0]), interpolation=cv2.INTER_NEAREST)\n"," mask = (mask > 0.5).astype(np.uint8) * 255 # Threshold to binary mask\n","\n"," # Find contours\n"," contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n","\n"," # Load original image for cropping\n"," original_image = cv2.imread(image_path)\n"," if original_image is None:\n"," raise ValueError(f\"Failed to load original image: {image_path}\")\n","\n"," # Create output directory for this cluster\n"," cluster_output_dir = os.path.join(output_dir, cluster_name)\n"," os.makedirs(cluster_output_dir, exist_ok=True)\n","\n"," # Extract panels\n"," for i, contour in enumerate(contours):\n"," # Get bounding box for each contour\n"," x, y, w, h = cv2.boundingRect(contour)\n"," # Skip small contours (optional, adjust threshold as needed)\n"," if w * h < 1000: # Ignore small regions (e.g., noise)\n"," continue\n"," # Crop panel from original image\n"," panel = original_image[y:y+h, x:x+w]\n"," # Save panel\n"," panel_path = os.path.join(cluster_output_dir, f'panel_{image_idx}_{i}.jpg')\n"," cv2.imwrite(panel_path, panel)\n","\n","# Option 1: Process images directly (if dataset is small)\n","def process_images_directly(clusters_path, model, output_dir):\n"," cluster_folders = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')]\n","\n"," for cluster_name in tqdm(cluster_folders, desc=\"Processing clusters\"):\n"," cluster_path = os.path.join(clusters_path, cluster_name)\n"," image_files = [f for f in os.listdir(cluster_path) if f.endswith(('.jpg', '.jpeg'))]\n","\n"," for idx, image_file in enumerate(tqdm(image_files, desc=f\"Processing {cluster_name}\")):\n"," image_path = os.path.join(cluster_path, image_file)\n"," try:\n"," # Preprocess image\n"," image, original_shape = preprocess_image(image_path)\n"," # Predict mask\n"," mask = model.predict(np.expand_dims(image, axis=0), verbose=0)[0]\n"," if mask.shape[-1] > 1: # Handle multi-class masks\n"," mask = np.argmax(mask, axis=-1) # Convert to single-channel\n"," else:\n"," mask = mask[..., 0] # Binary mask\n"," # Extract and save panels\n"," extract_panels(image_path, mask, original_shape, output_dir, cluster_name, idx)\n"," except Exception as e:\n"," print(f\"Error processing {image_path}: {e}\")\n","\n","# Option 2: Create Hugging Face dataset (for large datasets)\n","def create_hf_dataset(clusters_path):\n"," data = []\n"," cluster_folders = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')]\n","\n"," for cluster_name in cluster_folders:\n"," cluster_path = os.path.join(clusters_path, cluster_name)\n"," image_files = [f for f in os.listdir(cluster_path) if f.endswith(('.jpg', '.jpeg'))]\n"," for image_file in image_files:\n"," data.append({\n"," 'image_path': os.path.join(cluster_path, image_file),\n"," 'cluster': cluster_name\n"," })\n","\n"," df = pd.DataFrame(data)\n"," dataset = Dataset.from_pandas(df)\n"," return dataset\n","\n","# Choose processing method\n","use_hf_dataset = False # Set to True for large datasets\n","\n","if use_hf_dataset:\n"," # Process using Hugging Face dataset\n"," dataset = create_hf_dataset(clusters_path)\n"," for example in tqdm(dataset, desc=\"Processing images\"):\n"," image_path = example['image_path']\n"," cluster_name = example['cluster']\n"," try:\n"," image, original_shape = preprocess_image(image_path)\n"," mask = model.predict(np.expand_dims(image, axis=0), verbose=0)[0]\n"," if mask.shape[-1] > 1:\n"," mask = np.argmax(mask, axis=-1)\n"," else:\n"," mask = mask[..., 0]\n"," extract_panels(image_path, mask, original_shape, output_dir, cluster_name, example['index'])\n"," except Exception as e:\n"," print(f\"Error processing {image_path}: {e}\")\n","else:\n"," # Process directly\n"," process_images_directly(clusters_path, model, output_dir)\n","\n","# Cell 3: Zip and save extracted panels to Google Drive\n","def zip_directory(directory, zip_path):\n"," with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," for root, _, files in os.walk(directory):\n"," for file in files:\n"," file_path = os.path.join(root, file)\n"," arcname = os.path.relpath(file_path, directory)\n"," zipf.write(file_path, os.path.join('extracted_panels', arcname))\n","\n","# Create zip file\n","#zip_directory(output_dir, zip_output_path)\n","#print(f\"Extracted panels saved to {output_dir}\")\n","#print(f\"Zipped panels saved to {zip_output_path}\")\n","\n","# Optional: Debug a sample prediction\n","sample_cluster = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')][0]\n","sample_image = [f for f in os.listdir(os.path.join(clusters_path, sample_cluster)) if f.endswith(('.jpg', '.jpeg'))][0]\n","sample_image_path = os.path.join(clusters_path, sample_cluster, sample_image)\n","sample_image, original_shape = preprocess_image(sample_image_path)\n","sample_mask = model.predict(np.expand_dims(sample_image, axis=0), verbose=0)[0]\n","if sample_mask.shape[-1] > 1:\n"," sample_mask = np.argmax(sample_mask, axis=-1)\n","else:\n"," sample_mask = sample_mask[..., 0]\n","cv2.imwrite('/content/sample_pred_mask.png', sample_mask * 255)\n","print(\"Sample predicted mask saved to /content/sample_pred_mask.png\")"]},{"cell_type":"code","source":["import os\n","import zipfile\n","import random\n","from pathlib import Path\n","\n","# Define the root folder containing subfolders with images\n","root_folder = \"/content/drive/MyDrive/extracted_panels\"\n","\n","# Generate a random 5-digit number\n","random_number = f\"{random.randint(0, 99999):05d}\"\n","\n","# Define the output zip file path\n","zip_file_name = f\"/content/drive/MyDrive/training_data_{random_number}.zip\"\n","\n","# Collect all image files\n","image_files = []\n","for subfolder in Path(root_folder).glob(\"cluster_*\"):\n"," if subfolder.is_dir():\n"," for file_path in subfolder.glob(\"*\"):\n"," if file_path.is_file() and file_path.suffix.lower() in ('.jpg', '.jpeg', '.png', '.bmp', '.gif'):\n"," image_files.append(file_path)\n","\n","# Create a zip file\n","with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," # Add each image to the zip file with a sequential name\n"," for i, file_path in enumerate(image_files, 1):\n"," # Use the original extension\n"," ext = file_path.suffix.lower()\n"," # New filename: 1.jpg, 2.jpg, etc.\n"," new_filename = f\"{i}{ext}\"\n"," # Add the file to the zip with the new name\n"," zipf.write(file_path, arcname=new_filename)\n","\n","print(f\"Images zipped successfully into {zip_file_name} with {len(image_files)} images\")"],"metadata":{"id":"osL5jPF0hDDG"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Create numbered set for further clustering"],"metadata":{"id":"kFMxfm5RYvJt"}},{"cell_type":"code","source":["import os\n","import zipfile\n","import random\n","from pathlib import Path\n","\n","# Define the root folder containing subfolders with images\n","root_folder = \"/content/drive/MyDrive/extracted_panels\"\n","\n","# Generate a random 5-digit number\n","random_number = f\"{random.randint(0, 99999):05d}\"\n","\n","# Define the output zip file path\n","zip_file_name = f\"/content/drive/MyDrive/training_data_{random_number}.zip\"\n","\n","# Create a zip file\n","with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," # Walk through all subfolders in the root folder\n"," for subfolder in Path(root_folder).glob(\"cluster_*\"):\n"," if subfolder.is_dir():\n"," # Iterate through all files in the subfolder\n"," for file_path in subfolder.glob(\"*\"):\n"," if file_path.is_file() and file_path.suffix.lower() in ('.jpg', '.jpeg', '.png', '.bmp', '.gif'):\n"," # Add the image to the zip file, preserving the subfolder structure\n"," zipf.write(file_path, arcname=file_path.relative_to(root_folder))\n","\n","print(f\"Images zipped successfully into {zip_file_name}\")\n"],"metadata":{"id":"MR7C33klYD3E"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"UPzIJ402VVMd"},"source":["Train a Deep Panel Model\n","\n","Refer to https://github.com/pedrovgs/DeepPanel in how to build a dataset"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"JFtsOMxP6dJ7"},"outputs":[],"source":["# Install required dependencies\n","!pip install tensorflow\n","!pip install opencv-python-headless\n","!pip install numpy\n","!pip install tqdm\n","\n","# Clone the DeepPanel repository\n","!git clone https://github.com/pedrovgs/DeepPanel.git\n","%cd DeepPanel\n","\n","# Import necessary libraries\n","import os\n","import zipfile\n","from google.colab import drive\n","import tensorflow as tf\n","import cv2\n","import numpy as np\n","from tqdm import tqdm\n","\n","# Mount Google Drive\n","drive.mount('/content/drive')\n","\n","# Unzip the dataset\n","dataset_path = '/content/drive/MyDrive/mia_panel_dataset.zip'\n","extract_path = '/content/mia_panel_dataset'\n","\n","with zipfile.ZipFile(dataset_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_path)\n","\n","# Remove __MACOSX folder if it exists\n","!rm -rf /content/mia_panel_dataset/__MACOSX\n","\n","# Verify dataset structure\n","!ls /content/mia_panel_dataset\n","#!ls /content/mia_panel_dataset\n","# Verify number of files in training folders\n","!echo \"Training raw images:\"\n","!ls -l /content/mia_panel_dataset/training/raw | wc -l\n","!echo \"Training masks:\"\n","!ls -l /content/mia_panel_dataset/training/segmentation_mask | wc -l\n","# Verify subfolder structure (uncomment if needed for debugging)\n","# !ls /content/mia_panel_dataset/mia_panel_dataset/test\n","# !ls /content/mia_panel_dataset/mia_panel_dataset/training\n","\n","# Create necessary directories for model checkpoints\n","os.makedirs('checkpoints', exist_ok=True)\n","\n","# Define dataset paths (updated for nested mia_panel_dataset folder)\n","train_raw_path = '/content/mia_panel_dataset/training/raw'\n","train_mask_path = '/content/mia_panel_dataset/training/segmentation_mask'\n","test_raw_path = '/content/mia_panel_dataset/test/raw'\n","test_mask_path = '/content/mia_panel_dataset/test/segmentation_mask'\n","\n","# Define configuration\n","class Config:\n"," INPUT_SHAPE = (256, 256, 3) # Adjust based on your image size\n"," BATCH_SIZE = 2 # Reduced to handle small datasets\n"," EPOCHS = 300\n"," LEARNING_RATE = 1e-4\n"," MODEL_PATH = 'checkpoints/model.keras' # Updated to .keras format\n","\n","# Custom data loader\n","def load_image_and_mask(image_path, mask_path, target_size):\n"," image = tf.io.read_file(image_path)\n"," image = tf.image.decode_png(image, channels=3)\n"," image = tf.image.resize(image, target_size[:2])\n"," image = image / 255.0 # Normalize to [0, 1]\n","\n"," mask = tf.io.read_file(mask_path)\n"," mask = tf.image.decode_png(mask, channels=1)\n"," mask = tf.image.resize(mask, target_size[:2], method='nearest')\n"," mask = tf.cast(mask, tf.float32)\n"," # Normalize mask to [0, 1] for binary segmentation\n"," mask = mask / tf.reduce_max(mask) # Ensure mask values are [0, 1]\n"," mask = tf.where(mask > 0.5, 1.0, 0.0) # Binarize mask\n","\n"," return image, mask\n","\n","def create_dataset(raw_path, mask_path, batch_size, input_shape, is_train=True):\n"," image_files = sorted([os.path.join(raw_path, f) for f in os.listdir(raw_path) if f.endswith(('.png', '.jpg', '.jpeg'))])\n"," mask_files = sorted([os.path.join(mask_path, f) for f in os.listdir(mask_path) if f.endswith(('.png', '.jpg', '.jpeg'))])\n","\n"," # Ensure matching pairs\n"," print(f\"Found {len(image_files)} images and {len(mask_files)} masks\")\n"," assert len(image_files) == len(mask_files), \"Number of images and masks must match\"\n"," assert len(image_files) > 0, \"No images found in dataset\"\n","\n"," dataset = tf.data.Dataset.from_tensor_slices((image_files, mask_files))\n"," dataset = dataset.map(\n"," lambda x, y: load_image_and_mask(x, y, input_shape),\n"," num_parallel_calls=tf.data.AUTOTUNE\n"," )\n","\n"," if is_train:\n"," dataset = dataset.shuffle(buffer_size=1000)\n","\n"," dataset = dataset.batch(batch_size).prefetch(tf.data.AUTOTUNE)\n"," return dataset\n","\n","# Create datasets\n","train_dataset = create_dataset(\n"," train_raw_path, train_mask_path, Config.BATCH_SIZE, Config.INPUT_SHAPE, is_train=True\n",")\n","test_dataset = create_dataset(\n"," test_raw_path, test_mask_path, Config.BATCH_SIZE, Config.INPUT_SHAPE, is_train=False\n",")\n","\n","# Inspect a sample mask to verify format (optional debugging)\n","sample_image, sample_mask = next(iter(train_dataset))\n","print(f\"Sample mask shape: {sample_mask.shape}, min: {tf.reduce_min(sample_mask)}, max: {tf.reduce_max(sample_mask)}\")\n","\n","# Define the model (simplified U-Net inspired by DeepPanel's segmentation goal)\n","def build_model(input_shape):\n"," inputs = tf.keras.Input(shape=input_shape)\n","\n"," # Encoder\n"," c1 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(inputs)\n"," c1 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(c1)\n"," p1 = tf.keras.layers.MaxPooling2D()(c1)\n","\n"," c2 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(p1)\n"," c2 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(c2)\n"," p2 = tf.keras.layers.MaxPooling2D()(c2)\n","\n"," # Bottleneck\n"," b = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(p2)\n"," b = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(b)\n","\n"," # Decoder\n"," u1 = tf.keras.layers.Conv2DTranspose(128, 2, strides=2, padding='same')(b)\n"," u1 = tf.keras.layers.Concatenate()([u1, c2])\n"," c3 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(u1)\n"," c3 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(c3)\n","\n"," u2 = tf.keras.layers.Conv2DTranspose(64, 2, strides=2, padding='same')(c3)\n"," u2 = tf.keras.layers.Concatenate()([u2, c1])\n"," c4 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(u2)\n"," c4 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(c4)\n","\n"," outputs = tf.keras.layers.Conv2D(1, 1, activation='sigmoid')(c4) # Binary segmentation\n"," model = tf.keras.Model(inputs, outputs)\n"," return model\n","\n","# Build and compile model\n","model = build_model(Config.INPUT_SHAPE)\n","model.compile(\n"," optimizer=tf.keras.optimizers.Adam(learning_rate=Config.LEARNING_RATE),\n"," loss='binary_crossentropy', # Adjust if masks are multi-class\n"," metrics=['accuracy']\n",")\n","\n","# Train the model\n","history = model.fit(\n"," train_dataset,\n"," validation_data=test_dataset,\n"," epochs=Config.EPOCHS,\n"," callbacks=[\n"," tf.keras.callbacks.ModelCheckpoint(\n"," Config.MODEL_PATH, save_best_only=True, monitor='val_loss'\n"," )\n"," ]\n",")\n","\n","# Save the trained model to Google Drive\n","!cp checkpoints/model.keras /content/drive/MyDrive/deeppanel_model.keras"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Deep Panel Manga Extract.ipynb","timestamp":1761515783664},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Mp4 merge.ipynb","timestamp":1761494340884},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Mp4 merge.ipynb","timestamp":1761480798787},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1761335712919},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Drive to WebP.ipynb","timestamp":1760993725927},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760450712160},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}