Spaces:
Configuration error
Configuration error
Upload copy_of_welcome_to_colab.py
Browse files- copy_of_welcome_to_colab.py +915 -0
copy_of_welcome_to_colab.py
ADDED
|
@@ -0,0 +1,915 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Copy of Welcome To Colab
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1N6-JcsHJ-9Fk2J2B3DPEQe8OmebXIavh
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
# Commented out IPython magic to ensure Python compatibility.
|
| 11 |
+
!mkdir -p models
|
| 12 |
+
!git clone https://github.com/jantic/DeOldify.git
|
| 13 |
+
# %cd DeOldify
|
| 14 |
+
!pip install -r requirements-colab.txt
|
| 15 |
+
import sys
|
| 16 |
+
sys.path.append('/content/DeOldify')
|
| 17 |
+
!pip install deoldify opencv-python imageio[ffmpeg] tqdm transformers torch torchvision pillow
|
| 18 |
+
!apt update && apt install ffmpeg -y # For video processing
|
| 19 |
+
|
| 20 |
+
# Example for Artistic model
|
| 21 |
+
!wget https://huggingface.co/databuzzword/deoldify-artistic/resolve/main/ColorizeArtistic_gen.pth -O models/ColorizeArtistic_gen.pth
|
| 22 |
+
|
| 23 |
+
# Example for Stable model
|
| 24 |
+
!wget https://huggingface.co/databuzzword/deoldify-stable/resolve/main/ColorizeStable_gen.pth -O models/ColorizeStable_gen.pth
|
| 25 |
+
|
| 26 |
+
# Create models folder if not exists
|
| 27 |
+
import os
|
| 28 |
+
os.makedirs("models", exist_ok=True)
|
| 29 |
+
|
| 30 |
+
# Download video model weights
|
| 31 |
+
!wget -O models/ColorizeVideo_gen.pth https://data.deepai.org/deoldify/ColorizeVideo_gen.pth
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# Commented out IPython magic to ensure Python compatibility.
|
| 36 |
+
# %%writefile /content/colorize_runner_fixed_optimized.py
|
| 37 |
+
# """
|
| 38 |
+
# colorize_runner_fixed_optimized.py
|
| 39 |
+
# A robust, patched, zero-surprise runner for DeOldify-based image & video colorization.
|
| 40 |
+
# OPTIMIZED VERSION: Added GPU acceleration, batch processing, frame skipping/interpolation, and resizing for 5-10x faster videos.
|
| 41 |
+
#
|
| 42 |
+
# How to use:
|
| 43 |
+
# Terminal:
|
| 44 |
+
# python colorize_runner_fixed_optimized.py --image bw.jpg --out colored.jpg
|
| 45 |
+
# python colorize_runner_fixed_optimized.py --video bw.mp4 --out colored.mp4 --max-frames 200 --batch-size 8 --skip-interval 2 --resize-factor 0.7
|
| 46 |
+
#
|
| 47 |
+
# From notebook (recommended in Colab):
|
| 48 |
+
# from colorize_runner_fixed_optimized import colorize_image, colorize_video, main_cli
|
| 49 |
+
# colorize_image("/content/bw.jpg", "/content/colored.jpg", render_factor=21)
|
| 50 |
+
# # Video: colorize_video("/content/bw.mp4", "/content/colored.mp4", batch_size=8, skip_interval=2)
|
| 51 |
+
# # or call main_cli with arg list (it strips notebook args):
|
| 52 |
+
# main_cli(["--video", "/content/bw.mp4", "--batch-size", "8"])
|
| 53 |
+
#
|
| 54 |
+
# Notes:
|
| 55 |
+
# - This script attempts to be tolerant of DeOldify fork differences (different function names & signatures).
|
| 56 |
+
# - It patches torch.load to allow older saved objects to unpickle (necessary for many DeOldify .pth files).
|
| 57 |
+
# - Security note: unpickling model files can execute code. Only use official/trusted weights.
|
| 58 |
+
# - Optimizations: GPU full usage, batching (up to 16 frames), skipping (process every Nth frame + interpolate), resizing (downscale for speed).
|
| 59 |
+
# - For Colab: Enable GPU runtime. Install: !pip install deoldify opencv-python imageio[ffmpeg] tqdm transformers torch torchvision
|
| 60 |
+
# - Clone DeOldify: !git clone https://github.com/jantic/DeOldify.git; import sys; sys.path.append('/content/DeOldify')
|
| 61 |
+
# """
|
| 62 |
+
#
|
| 63 |
+
# import os
|
| 64 |
+
# import sys
|
| 65 |
+
# import shutil
|
| 66 |
+
# import tempfile
|
| 67 |
+
# import math
|
| 68 |
+
# import inspect
|
| 69 |
+
# import mimetypes
|
| 70 |
+
# import imghdr
|
| 71 |
+
# import argparse # For CLI
|
| 72 |
+
# from pathlib import Path
|
| 73 |
+
# from typing import Optional, Tuple, Dict, List
|
| 74 |
+
# import torch
|
| 75 |
+
# import cv2
|
| 76 |
+
# import numpy as np
|
| 77 |
+
# from PIL import Image
|
| 78 |
+
# import time # For timing benchmarks
|
| 79 |
+
# import subprocess # For optional FFmpeg
|
| 80 |
+
# from tqdm import tqdm
|
| 81 |
+
# import imageio
|
| 82 |
+
#
|
| 83 |
+
# # Optional: transformers (BLIP) for captioning
|
| 84 |
+
# try:
|
| 85 |
+
# from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 86 |
+
# HAS_BLIP = True
|
| 87 |
+
# except Exception:
|
| 88 |
+
# HAS_BLIP = False
|
| 89 |
+
#
|
| 90 |
+
# # -------------------------
|
| 91 |
+
# # GPU Setup (Global)
|
| 92 |
+
# # -------------------------
|
| 93 |
+
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 94 |
+
# print(f"Using device: {device}")
|
| 95 |
+
# if torch.cuda.is_available():
|
| 96 |
+
# print(f"GPU: {torch.cuda.get_device_name(0)}")
|
| 97 |
+
# print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
|
| 98 |
+
#
|
| 99 |
+
# # Function to move colorizer to GPU (call after loading)
|
| 100 |
+
# def move_colorizer_to_gpu(colorizer):
|
| 101 |
+
# if hasattr(colorizer, 'model') and colorizer.model is not None:
|
| 102 |
+
# colorizer.model = colorizer.model.to(device)
|
| 103 |
+
# # Handle if it's a nn.Module directly
|
| 104 |
+
# if isinstance(colorizer, torch.nn.Module):
|
| 105 |
+
# colorizer = colorizer.to(device)
|
| 106 |
+
# # Recurse for nested models (common in DeOldify)
|
| 107 |
+
# for attr_name in dir(colorizer):
|
| 108 |
+
# attr = getattr(colorizer, attr_name)
|
| 109 |
+
# if isinstance(attr, torch.nn.Module):
|
| 110 |
+
# setattr(colorizer, attr_name, attr.to(device))
|
| 111 |
+
# print("Colorizer moved to GPU.")
|
| 112 |
+
# return colorizer
|
| 113 |
+
#
|
| 114 |
+
# # -------------------------
|
| 115 |
+
# # PyTorch safety patch for older pickles (DeOldify weights)
|
| 116 |
+
# # -------------------------
|
| 117 |
+
# def _patch_torch_load_for_legacy_weights():
|
| 118 |
+
# """
|
| 119 |
+
# Patch torch.load to load legacy DeOldify checkpoints that contain objects
|
| 120 |
+
# disallowed by the new 'weights_only=True' default in PyTorch >=2.6.
|
| 121 |
+
#
|
| 122 |
+
# This patch forces weights_only=False when torch.load is called without an explicit
|
| 123 |
+
# weights_only argument. This is necessary to unpickle some older checkpoints.
|
| 124 |
+
# SECURITY: Only do this when you trust the checkpoint source (DeOldify official repo).
|
| 125 |
+
# """
|
| 126 |
+
# try:
|
| 127 |
+
# import torch
|
| 128 |
+
# import functools
|
| 129 |
+
# except Exception:
|
| 130 |
+
# return # torch not installed yet
|
| 131 |
+
#
|
| 132 |
+
# try:
|
| 133 |
+
# # allowlist common globals used by old checkpoints
|
| 134 |
+
# safe_list = [functools.partial, torch.nn.modules.batchnorm.BatchNorm2d]
|
| 135 |
+
# if hasattr(torch.serialization, "add_safe_globals"):
|
| 136 |
+
# try:
|
| 137 |
+
# torch.serialization.add_safe_globals(safe_list)
|
| 138 |
+
# except Exception:
|
| 139 |
+
# # ignore if unavailable
|
| 140 |
+
# pass
|
| 141 |
+
# except Exception:
|
| 142 |
+
# pass
|
| 143 |
+
#
|
| 144 |
+
# # Monkey-patch torch.load to set weights_only=False by default (only when not provided).
|
| 145 |
+
# try:
|
| 146 |
+
# old_load = torch.load
|
| 147 |
+
# def patched_load(*args, **kwargs):
|
| 148 |
+
# if "weights_only" not in kwargs:
|
| 149 |
+
# kwargs["weights_only"] = False
|
| 150 |
+
# return old_load(*args, **kwargs)
|
| 151 |
+
# torch.load = patched_load
|
| 152 |
+
# except Exception:
|
| 153 |
+
# pass
|
| 154 |
+
#
|
| 155 |
+
# # Apply patch immediately (harmless if torch isn't present)
|
| 156 |
+
# _patch_torch_load_for_legacy_weights()
|
| 157 |
+
#
|
| 158 |
+
# # -------------------------
|
| 159 |
+
# # Attempt flexible DeOldify import (support various forks/layouts)
|
| 160 |
+
# # -------------------------
|
| 161 |
+
# HAS_DEOLDIFY = False
|
| 162 |
+
# _get_image_colorizer_fn = None
|
| 163 |
+
#
|
| 164 |
+
# def _import_deoldify_helpers():
|
| 165 |
+
# """
|
| 166 |
+
# Attempt multiple import paths and capture get_image_colorizer.
|
| 167 |
+
# """
|
| 168 |
+
# global HAS_DEOLDIFY, _get_image_colorizer_fn
|
| 169 |
+
# if _get_image_colorizer_fn is not None:
|
| 170 |
+
# HAS_DEOLDIFY = True
|
| 171 |
+
# return
|
| 172 |
+
#
|
| 173 |
+
# tried = []
|
| 174 |
+
# candidates = [
|
| 175 |
+
# "deoldify.visualize", # typical
|
| 176 |
+
# "DeOldify.deoldify.visualize", # other layout if cloned inside package folder
|
| 177 |
+
# "deoldify", # fallback: maybe installed differently
|
| 178 |
+
# ]
|
| 179 |
+
# for modname in candidates:
|
| 180 |
+
# try:
|
| 181 |
+
# mod = __import__(modname, fromlist=["get_image_colorizer"])
|
| 182 |
+
# if hasattr(mod, "get_image_colorizer"):
|
| 183 |
+
# _get_image_colorizer_fn = getattr(mod, "get_image_colorizer")
|
| 184 |
+
# HAS_DEOLDIFY = True
|
| 185 |
+
# return
|
| 186 |
+
# # some forks might provide a different helper name; try to find anything called get_*coloriz*
|
| 187 |
+
# for name in dir(mod):
|
| 188 |
+
# if "color" in name and "get" in name:
|
| 189 |
+
# func = getattr(mod, name)
|
| 190 |
+
# if callable(func):
|
| 191 |
+
# _get_image_colorizer_fn = func
|
| 192 |
+
# HAS_DEOLDIFY = True
|
| 193 |
+
# return
|
| 194 |
+
# except Exception as e:
|
| 195 |
+
# tried.append((modname, str(e)))
|
| 196 |
+
# HAS_DEOLDIFY = False
|
| 197 |
+
# # no raise - we'll surface friendly error when user calls functions
|
| 198 |
+
#
|
| 199 |
+
# _import_deoldify_helpers()
|
| 200 |
+
#
|
| 201 |
+
# # -------------------------
|
| 202 |
+
# # BLIP caption utilities (optional)
|
| 203 |
+
# # -------------------------
|
| 204 |
+
# _blip_proc = None
|
| 205 |
+
# _blip_model = None
|
| 206 |
+
# def _init_blip(model_name: str="Salesforce/blip-image-captioning-base"):
|
| 207 |
+
# global _blip_proc, _blip_model, HAS_BLIP
|
| 208 |
+
# if not HAS_BLIP:
|
| 209 |
+
# return False
|
| 210 |
+
# if _blip_proc is None:
|
| 211 |
+
# _blip_proc = BlipProcessor.from_pretrained(model_name)
|
| 212 |
+
# if _blip_model is None:
|
| 213 |
+
# _blip_model = BlipForConditionalGeneration.from_pretrained(model_name).to(device)
|
| 214 |
+
# return True
|
| 215 |
+
#
|
| 216 |
+
# def generate_caption(image_path: str, max_length: int=40) -> Optional[str]:
|
| 217 |
+
# if not HAS_BLIP:
|
| 218 |
+
# return None
|
| 219 |
+
# _init_blip()
|
| 220 |
+
# img = Image.open(image_path).convert("RGB")
|
| 221 |
+
# inputs = _blip_proc(images=img, return_tensors="pt").to(device)
|
| 222 |
+
# with torch.no_grad():
|
| 223 |
+
# out = _blip_model.generate(**inputs, max_length=max_length, num_beams=4)
|
| 224 |
+
# caption = _blip_proc.tokenizer.decode(out[0], skip_special_tokens=True)
|
| 225 |
+
# return caption
|
| 226 |
+
#
|
| 227 |
+
# # -------------------------
|
| 228 |
+
# # Helper utilities
|
| 229 |
+
# # -------------------------
|
| 230 |
+
# def is_image(path: str) -> bool:
|
| 231 |
+
# if not os.path.exists(path): return False
|
| 232 |
+
# mt, _ = mimetypes.guess_type(path)
|
| 233 |
+
# if mt and mt.startswith("image"): return True
|
| 234 |
+
# try:
|
| 235 |
+
# if imghdr.what(path) is not None:
|
| 236 |
+
# return True
|
| 237 |
+
# except Exception:
|
| 238 |
+
# pass
|
| 239 |
+
# try:
|
| 240 |
+
# Image.open(path).verify()
|
| 241 |
+
# return True
|
| 242 |
+
# except Exception:
|
| 243 |
+
# return False
|
| 244 |
+
#
|
| 245 |
+
# def is_video(path: str) -> bool:
|
| 246 |
+
# if not os.path.exists(path): return False
|
| 247 |
+
# mt, _ = mimetypes.guess_type(path)
|
| 248 |
+
# if mt and mt.startswith("video"): return True
|
| 249 |
+
# try:
|
| 250 |
+
# cap = cv2.VideoCapture(path)
|
| 251 |
+
# ok, _ = cap.read()
|
| 252 |
+
# cap.release()
|
| 253 |
+
# return ok
|
| 254 |
+
# except Exception:
|
| 255 |
+
# return False
|
| 256 |
+
#
|
| 257 |
+
# def detect_media(path: str) -> Optional[str]:
|
| 258 |
+
# if is_image(path): return "image"
|
| 259 |
+
# if is_video(path): return "video"
|
| 260 |
+
# return None
|
| 261 |
+
#
|
| 262 |
+
# # -------------------------
|
| 263 |
+
# # DeOldify colorizer helper (robust)
|
| 264 |
+
# # -------------------------
|
| 265 |
+
# _colorizer_cache = {}
|
| 266 |
+
#
|
| 267 |
+
# def get_deoldify_colorizer(artistic: bool=True, *args, **kwargs):
|
| 268 |
+
# """
|
| 269 |
+
# Load and cache a DeOldify image colorizer object. Accepts various signatures.
|
| 270 |
+
# Returns the loaded colorizer object or raises a helpful RuntimeError.
|
| 271 |
+
# """
|
| 272 |
+
# if not HAS_DEOLDIFY or _get_image_colorizer_fn is None:
|
| 273 |
+
# raise RuntimeError(
|
| 274 |
+
# "DeOldify helper not found. Please clone the DeOldify repo and add it to PYTHONPATH "
|
| 275 |
+
# "(or install a compatible fork). Example:\n"
|
| 276 |
+
# " git clone https://github.com/jantic/DeOldify.git\n"
|
| 277 |
+
# " sys.path.append('/content/DeOldify')\n"
|
| 278 |
+
# )
|
| 279 |
+
#
|
| 280 |
+
# cache_key = ("deoldify_colorizer", artistic)
|
| 281 |
+
# if cache_key in _colorizer_cache:
|
| 282 |
+
# return _colorizer_cache[cache_key]
|
| 283 |
+
#
|
| 284 |
+
# # Try to call the function with different parameter names, defensively
|
| 285 |
+
# fn = _get_image_colorizer_fn
|
| 286 |
+
# signature = None
|
| 287 |
+
# try:
|
| 288 |
+
# signature = inspect.signature(fn)
|
| 289 |
+
# except Exception:
|
| 290 |
+
# pass
|
| 291 |
+
#
|
| 292 |
+
# # Build candidate kwargs based on signature
|
| 293 |
+
# call_kwargs = {}
|
| 294 |
+
# if signature:
|
| 295 |
+
# params = signature.parameters
|
| 296 |
+
# if "artistic" in params:
|
| 297 |
+
# call_kwargs["artistic"] = artistic
|
| 298 |
+
# elif "mode" in params:
|
| 299 |
+
# call_kwargs["mode"] = "artistic" if artistic else "stable"
|
| 300 |
+
# # some versions accept weights_path or weights_name; leave them out unless provided
|
| 301 |
+
# else:
|
| 302 |
+
# # unknown signature - just call with a single boolean
|
| 303 |
+
# try:
|
| 304 |
+
# colorizer = fn(artistic)
|
| 305 |
+
# colorizer = move_colorizer_to_gpu(colorizer)
|
| 306 |
+
# _colorizer_cache[cache_key] = colorizer
|
| 307 |
+
# return colorizer
|
| 308 |
+
# except Exception as e:
|
| 309 |
+
# raise RuntimeError("Could not call DeOldify helper: " + str(e))
|
| 310 |
+
#
|
| 311 |
+
# # attempt call
|
| 312 |
+
# try:
|
| 313 |
+
# colorizer = fn(**call_kwargs)
|
| 314 |
+
# except TypeError:
|
| 315 |
+
# # fallback - call with no args
|
| 316 |
+
# colorizer = fn()
|
| 317 |
+
# colorizer = move_colorizer_to_gpu(colorizer)
|
| 318 |
+
# _colorizer_cache[cache_key] = colorizer
|
| 319 |
+
# return colorizer
|
| 320 |
+
#
|
| 321 |
+
# def _find_colorize_method(colorizer):
|
| 322 |
+
# """
|
| 323 |
+
# Return a callable that colorizes an image path and returns either:
|
| 324 |
+
# - path to output file
|
| 325 |
+
# - PIL Image
|
| 326 |
+
# - numpy array
|
| 327 |
+
# We try common method names across forks.
|
| 328 |
+
# """
|
| 329 |
+
# candidates = [
|
| 330 |
+
# "colorize_from_path",
|
| 331 |
+
# "colorize_from_file",
|
| 332 |
+
# "colorize",
|
| 333 |
+
# "get_transformed_image",
|
| 334 |
+
# "get_colorized_image",
|
| 335 |
+
# "colorize_image"
|
| 336 |
+
# ]
|
| 337 |
+
# for name in candidates:
|
| 338 |
+
# if hasattr(colorizer, name):
|
| 339 |
+
# return getattr(colorizer, name)
|
| 340 |
+
# # Some colorizers return a method nested under `.colorizer` or similar
|
| 341 |
+
# for attr in dir(colorizer):
|
| 342 |
+
# if "colorize" in attr and callable(getattr(colorizer, attr)):
|
| 343 |
+
# return getattr(colorizer, attr)
|
| 344 |
+
# raise RuntimeError("Cannot find a colorize method in loaded DeOldify colorizer object. Inspect the object.")
|
| 345 |
+
#
|
| 346 |
+
# # -------------------------
|
| 347 |
+
# # Optimized Image colorization (Supports Batches)
|
| 348 |
+
# # -------------------------
|
| 349 |
+
# def colorize_image(input_paths_or_arrays, # str path, list of paths, or np.array/list of arrays
|
| 350 |
+
# output_paths_or_dir: str, # Single path, list, or dir to save
|
| 351 |
+
# render_factor: int = 35,
|
| 352 |
+
# produce_caption: bool = True,
|
| 353 |
+
# artistic: bool = True,
|
| 354 |
+
# batch_size: int = 8,
|
| 355 |
+
# resize_factor: float = 1.0) -> List[Dict]:
|
| 356 |
+
# """
|
| 357 |
+
# Colorize single image or batch. Returns list of {'output_path': str, 'caption': Optional[str]}
|
| 358 |
+
# """
|
| 359 |
+
# is_single = not isinstance(input_paths_or_arrays, (list, tuple))
|
| 360 |
+
# if is_single:
|
| 361 |
+
# inputs = [input_paths_or_arrays]
|
| 362 |
+
# if isinstance(output_paths_or_dir, str):
|
| 363 |
+
# outputs = [output_paths_or_dir] # Single output
|
| 364 |
+
# else:
|
| 365 |
+
# outputs = [output_paths_or_dir]
|
| 366 |
+
# else:
|
| 367 |
+
# inputs = input_paths_or_arrays
|
| 368 |
+
# if isinstance(output_paths_or_dir, str): # Dir mode
|
| 369 |
+
# os.makedirs(output_paths_or_dir, exist_ok=True)
|
| 370 |
+
# outputs = [os.path.join(output_paths_or_dir, f"colored_{i:06d}.png") for i in range(len(inputs))]
|
| 371 |
+
# else:
|
| 372 |
+
# outputs = output_paths_or_dir
|
| 373 |
+
#
|
| 374 |
+
# colorizer = get_deoldify_colorizer(artistic=artistic)
|
| 375 |
+
# colorize_fn = _find_colorize_method(colorizer)
|
| 376 |
+
#
|
| 377 |
+
# results = []
|
| 378 |
+
# start_time = time.time()
|
| 379 |
+
#
|
| 380 |
+
# # Process in batches
|
| 381 |
+
# for i in tqdm(range(0, len(inputs), batch_size), desc="Batching colorization"):
|
| 382 |
+
# batch_inputs = inputs[i:i + batch_size]
|
| 383 |
+
# batch_outputs = outputs[i:i + batch_size]
|
| 384 |
+
#
|
| 385 |
+
# batch_results = []
|
| 386 |
+
# for j, (inp, outp) in enumerate(zip(batch_inputs, batch_outputs)):
|
| 387 |
+
# # Load image if path
|
| 388 |
+
# if isinstance(inp, str):
|
| 389 |
+
# if not os.path.exists(inp):
|
| 390 |
+
# raise FileNotFoundError(f"Input not found: {inp}")
|
| 391 |
+
# img_array = cv2.imread(inp)
|
| 392 |
+
# img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB)
|
| 393 |
+
# else:
|
| 394 |
+
# img_array = inp if isinstance(inp, np.ndarray) else np.array(inp)
|
| 395 |
+
#
|
| 396 |
+
# # Resize for speed (optional)
|
| 397 |
+
# orig_shape = img_array.shape[:2]
|
| 398 |
+
# if resize_factor != 1.0:
|
| 399 |
+
# h, w = int(img_array.shape[0] * resize_factor), int(img_array.shape[1] * resize_factor)
|
| 400 |
+
# img_array = cv2.resize(img_array, (w, h))
|
| 401 |
+
#
|
| 402 |
+
# # Defensive colorize call
|
| 403 |
+
# res = None
|
| 404 |
+
# try_patterns = [
|
| 405 |
+
# {"path": inp, "render_factor": render_factor} if isinstance(inp, str) else None,
|
| 406 |
+
# {"image": img_array, "render_factor": render_factor},
|
| 407 |
+
# {"render_factor": render_factor},
|
| 408 |
+
# {}
|
| 409 |
+
# ]
|
| 410 |
+
# for kwargs in try_patterns:
|
| 411 |
+
# if kwargs is None: continue
|
| 412 |
+
# try:
|
| 413 |
+
# res = colorize_fn(**kwargs)
|
| 414 |
+
# break
|
| 415 |
+
# except TypeError:
|
| 416 |
+
# continue
|
| 417 |
+
#
|
| 418 |
+
# if res is None:
|
| 419 |
+
# try:
|
| 420 |
+
# res = colorize_fn(inp if isinstance(inp, str) else img_array)
|
| 421 |
+
# except Exception as e:
|
| 422 |
+
# raise RuntimeError(f"Colorize failed for batch item {j}: {e}")
|
| 423 |
+
#
|
| 424 |
+
# # Handle result
|
| 425 |
+
# final_out = None
|
| 426 |
+
# if isinstance(res, str) and os.path.exists(res):
|
| 427 |
+
# final_out = res
|
| 428 |
+
# shutil.copy(final_out, outp)
|
| 429 |
+
# elif isinstance(res, (tuple, list)) and len(res) > 0 and isinstance(res[0], str) and os.path.exists(res[0]):
|
| 430 |
+
# shutil.copy(res[0], outp)
|
| 431 |
+
# final_out = outp
|
| 432 |
+
# elif hasattr(res, "save"):
|
| 433 |
+
# res.save(outp)
|
| 434 |
+
# final_out = outp
|
| 435 |
+
# elif isinstance(res, np.ndarray):
|
| 436 |
+
# # Resize back if needed
|
| 437 |
+
# if resize_factor != 1.0:
|
| 438 |
+
# res = cv2.resize(res, orig_shape[::-1])
|
| 439 |
+
# Image.fromarray(res).save(outp)
|
| 440 |
+
# final_out = outp
|
| 441 |
+
# else:
|
| 442 |
+
# # Fallback copy/search (as in original)
|
| 443 |
+
# if isinstance(inp, str):
|
| 444 |
+
# shutil.copy(inp, outp)
|
| 445 |
+
# else:
|
| 446 |
+
# Image.fromarray(img_array).save(outp)
|
| 447 |
+
# final_out = outp
|
| 448 |
+
#
|
| 449 |
+
# # Caption if single image mode
|
| 450 |
+
# caption = None
|
| 451 |
+
# if produce_caption and HAS_BLIP and is_single:
|
| 452 |
+
# try:
|
| 453 |
+
# caption = generate_caption(final_out)
|
| 454 |
+
|
| 455 |
+
# Append missing code to complete the file (run this after the previous %%writefile)
|
| 456 |
+
with open('/content/colorize_runner_fixed_optimized.py', 'a') as f:
|
| 457 |
+
f.write('''
|
| 458 |
+
except Exception:
|
| 459 |
+
pass
|
| 460 |
+
|
| 461 |
+
batch_results.append({"output_path": final_out, "caption": caption})
|
| 462 |
+
|
| 463 |
+
results.extend(batch_results)
|
| 464 |
+
|
| 465 |
+
end_time = time.time()
|
| 466 |
+
print(f"Colorized {len(inputs)} item(s) in {end_time - start_time:.2f}s ({len(inputs)/(end_time - start_time):.1f} items/sec)")
|
| 467 |
+
|
| 468 |
+
return results[0] if is_single else results
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
# -------------------------
|
| 472 |
+
# Video pipeline (Optimized)
|
| 473 |
+
# -------------------------
|
| 474 |
+
def extract_frames(video_path: str, frames_dir: str, target_fps: Optional[int] = None, skip_interval: int = 1, use_ffmpeg: bool = False) -> Tuple[int, int]:
|
| 475 |
+
"""
|
| 476 |
+
Extract frames from video, optionally skipping for speed.
|
| 477 |
+
Returns (num_extracted_frames, fps)
|
| 478 |
+
"""
|
| 479 |
+
os.makedirs(frames_dir, exist_ok=True)
|
| 480 |
+
if use_ffmpeg:
|
| 481 |
+
# FFmpeg for faster extraction (install: !apt install ffmpeg in Colab)
|
| 482 |
+
cap = cv2.VideoCapture(video_path)
|
| 483 |
+
orig_fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
|
| 484 |
+
cap.release()
|
| 485 |
+
fps = int(round(orig_fps)) if target_fps is None else int(target_fps)
|
| 486 |
+
scale_fps = fps / max(1, skip_interval)
|
| 487 |
+
cmd = [
|
| 488 |
+
'ffmpeg', '-i', video_path,
|
| 489 |
+
'-vf', f'fps={scale_fps}',
|
| 490 |
+
'-y', f'{frames_dir}/frame_%06d.png'
|
| 491 |
+
]
|
| 492 |
+
result = subprocess.run(cmd, capture_output=True, check=True)
|
| 493 |
+
frame_files = sorted([f for f in os.listdir(frames_dir) if f.endswith('.png')])
|
| 494 |
+
print(f"FFmpeg extracted {len(frame_files)} frames (effective skip: {skip_interval})")
|
| 495 |
+
return len(frame_files), fps
|
| 496 |
+
else:
|
| 497 |
+
# OpenCV with skipping
|
| 498 |
+
cap = cv2.VideoCapture(video_path)
|
| 499 |
+
if not cap.isOpened():
|
| 500 |
+
raise RuntimeError(f"Cannot open video {video_path}")
|
| 501 |
+
orig_fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
|
| 502 |
+
fps = int(round(orig_fps)) if target_fps is None else int(target_fps)
|
| 503 |
+
interval = max(1, skip_interval)
|
| 504 |
+
idx = 0
|
| 505 |
+
saved = 0
|
| 506 |
+
while True:
|
| 507 |
+
ret, frame = cap.read()
|
| 508 |
+
if not ret:
|
| 509 |
+
break
|
| 510 |
+
if idx % interval == 0:
|
| 511 |
+
fname = os.path.join(frames_dir, f"frame_{saved:06d}.png")
|
| 512 |
+
cv2.imwrite(fname, frame)
|
| 513 |
+
saved += 1
|
| 514 |
+
idx += 1
|
| 515 |
+
cap.release()
|
| 516 |
+
print(f"OpenCV extracted {saved} frames (skipped every {interval-1})")
|
| 517 |
+
return saved, fps
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def interpolate_skipped_frames(color_dir: str, orig_num_frames: int, skip_interval: int = 1) -> None:
|
| 521 |
+
"""
|
| 522 |
+
If frames were skipped, interpolate (blend) to create full sequence.
|
| 523 |
+
Assumes processed frames are in color_dir as frame_000000.png, etc.
|
| 524 |
+
This is a simple linear blend; for better quality, use optical flow (e.g., via OpenCV's DISOpticalFlow).
|
| 525 |
+
"""
|
| 526 |
+
if skip_interval <= 1:
|
| 527 |
+
return # No skipping needed
|
| 528 |
+
|
| 529 |
+
processed_files = sorted([f for f in os.listdir(color_dir) if f.startswith('frame_') and f.endswith('.png')])
|
| 530 |
+
num_processed = len(processed_files)
|
| 531 |
+
if num_processed == 0:
|
| 532 |
+
return
|
| 533 |
+
|
| 534 |
+
# Load processed frames
|
| 535 |
+
processed_frames = []
|
| 536 |
+
for f in processed_files:
|
| 537 |
+
img = cv2.imread(os.path.join(color_dir, f))
|
| 538 |
+
processed_frames.append(img)
|
| 539 |
+
|
| 540 |
+
# Generate full sequence with interpolation
|
| 541 |
+
full_frames = []
|
| 542 |
+
for i in range(orig_num_frames):
|
| 543 |
+
# Find nearest processed frames
|
| 544 |
+
proc_idx = i // skip_interval
|
| 545 |
+
if proc_idx >= num_processed:
|
| 546 |
+
proc_idx = num_processed - 1
|
| 547 |
+
prev_frame = processed_frames[proc_idx]
|
| 548 |
+
|
| 549 |
+
# Simple hold or blend with next if available
|
| 550 |
+
if proc_idx + 1 < num_processed and i % skip_interval != 0:
|
| 551 |
+
next_frame = processed_frames[proc_idx + 1]
|
| 552 |
+
alpha = (i % skip_interval) / skip_interval
|
| 553 |
+
blended = cv2.addWeighted(prev_frame, 1 - alpha, next_frame, alpha, 0)
|
| 554 |
+
full_frames.append(blended)
|
| 555 |
+
else:
|
| 556 |
+
full_frames.append(prev_frame)
|
| 557 |
+
|
| 558 |
+
# Overwrite with full sequence
|
| 559 |
+
for i, frame in enumerate(full_frames):
|
| 560 |
+
fname = os.path.join(color_dir, f"frame_{i:06d}.png")
|
| 561 |
+
cv2.imwrite(fname, frame)
|
| 562 |
+
print(f"Interpolated to {orig_num_frames} full frames.")
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def reassemble_video(frames_dir: str, output_path: str, fps: int = 25) -> None:
|
| 566 |
+
"""
|
| 567 |
+
Reassemble colored frames into video using imageio (or FFmpeg).
|
| 568 |
+
"""
|
| 569 |
+
frame_files = sorted([os.path.join(frames_dir, f) for f in os.listdir(frames_dir) if f.startswith('frame_') and f.endswith('.png')])
|
| 570 |
+
if not frame_files:
|
| 571 |
+
raise RuntimeError("No frames found to reassemble.")
|
| 572 |
+
|
| 573 |
+
# Use imageio for simplicity (FFmpeg backend if installed)
|
| 574 |
+
with imageio.get_writer(output_path, fps=fps, codec='libx264') as writer:
|
| 575 |
+
for frame_path in tqdm(frame_files, desc="Reassembling video"):
|
| 576 |
+
img = imageio.imread(frame_path)
|
| 577 |
+
writer.append_data(img)
|
| 578 |
+
|
| 579 |
+
print(f"Video saved to {output_path}")
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def colorize_video(input_path: str,
|
| 583 |
+
output_path: str,
|
| 584 |
+
max_frames: Optional[int] = None,
|
| 585 |
+
batch_size: int = 8,
|
| 586 |
+
skip_interval: int = 1,
|
| 587 |
+
resize_factor: float = 1.0,
|
| 588 |
+
artistic: bool = True,
|
| 589 |
+
render_factor: int = 35,
|
| 590 |
+
use_ffmpeg: bool = True,
|
| 591 |
+
target_fps: Optional[int] = None) -> Dict:
|
| 592 |
+
"""
|
| 593 |
+
Full optimized video colorization pipeline.
|
| 594 |
+
Returns {'output_path': str, 'processed_frames': int, 'total_time': float}
|
| 595 |
+
"""
|
| 596 |
+
if not is_video(input_path):
|
| 597 |
+
raise ValueError(f"Input {input_path} is not a valid video.")
|
| 598 |
+
|
| 599 |
+
start_time = time.time()
|
| 600 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 601 |
+
frames_dir = os.path.join(temp_dir, "frames")
|
| 602 |
+
color_dir = os.path.join(temp_dir, "colored")
|
| 603 |
+
|
| 604 |
+
# Step 1: Extract frames (with skipping)
|
| 605 |
+
cap = cv2.VideoCapture(input_path)
|
| 606 |
+
orig_num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 607 |
+
cap.release()
|
| 608 |
+
extract_num = min(orig_num_frames, max_frames) if max_frames else orig_num_frames
|
| 609 |
+
num_extracted, fps = extract_frames(input_path, frames_dir, target_fps, skip_interval, use_ffmpeg)
|
| 610 |
+
|
| 611 |
+
# Step 2: Colorize extracted frames (batch)
|
| 612 |
+
colorize_image(frames_dir, color_dir, render_factor=render_factor, artistic=artistic,
|
| 613 |
+
batch_size=batch_size, resize_factor=resize_factor, produce_caption=False)
|
| 614 |
+
|
| 615 |
+
# Step 3: Interpolate skipped frames
|
| 616 |
+
interpolate_skipped_frames(color_dir, orig_num_frames, skip_interval)
|
| 617 |
+
|
| 618 |
+
# Step 4: Reassemble video
|
| 619 |
+
reassemble_video(color_dir, output_path, fps)
|
| 620 |
+
|
| 621 |
+
total_time = time.time() - start_time
|
| 622 |
+
print(f"Video colorized in {total_time:.2f}s ({num_extracted} frames processed, {orig_num_frames} total)")
|
| 623 |
+
return {"output_path": output_path, "processed_frames": num_extracted, "total_time": total_time}
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
# -------------------------
|
| 627 |
+
# CLI Interface
|
| 628 |
+
# -------------------------
|
| 629 |
+
def main_cli(args: Optional[List[str]] = None):
|
| 630 |
+
"""
|
| 631 |
+
CLI entrypoint. Call with sys.argv or list.
|
| 632 |
+
"""
|
| 633 |
+
parser = argparse.ArgumentParser(description="DeOldify Colorization Runner")
|
| 634 |
+
parser.add_argument("--image", type=str, help="Input image path")
|
| 635 |
+
parser.add_argument("--video", type=str, help="Input video path")
|
| 636 |
+
parser.add_argument("--out", "-o", type=str, required=True, help="Output path")
|
| 637 |
+
parser.add_argument("--render-factor", type=int, default=35, help="Render factor (21-40)")
|
| 638 |
+
parser.add_argument("--artistic", action="store_true", default=True, help="Use artistic mode")
|
| 639 |
+
parser.add_argument("--batch-size", type=int, default=8, help="Batch size for processing")
|
| 640 |
+
parser.add_argument("--skip-interval", type=int, default=1, help="Frame skip interval (1=full)")
|
| 641 |
+
parser.add_argument("--resize-factor", type=float, default=1.0, help="Resize factor for speed (0.5=half size)")
|
| 642 |
+
parser.add_argument("--max-frames", type=int, default=None, help="Max frames to process (videos)")
|
| 643 |
+
|
| 644 |
+
if args is None:
|
| 645 |
+
args = sys.argv[1:]
|
| 646 |
+
opts = parser.parse_args(args)
|
| 647 |
+
|
| 648 |
+
if opts.image:
|
| 649 |
+
result = colorize_image(opts.image, opts.out, render_factor=opts.render_factor,
|
| 650 |
+
artistic=opts.artistic, batch_size=opts.batch_size,
|
| 651 |
+
resize_factor=opts.resize_factor)
|
| 652 |
+
print(f"Colored image: {result['output_path']}")
|
| 653 |
+
elif opts.video:
|
| 654 |
+
result = colorize_video(opts.video, opts.out, max_frames=opts.max_frames,
|
| 655 |
+
batch_size=opts.batch_size, skip_interval=opts.skip_interval,
|
| 656 |
+
resize_factor=opts.resize_factor, artistic=opts.artistic,
|
| 657 |
+
render_factor=opts.render_factor)
|
| 658 |
+
print(f"Colored video: {result['output_path']}")
|
| 659 |
+
else:
|
| 660 |
+
parser.print_help()
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
if __name__ == "__main__":
|
| 664 |
+
main_cli()
|
| 665 |
+
''')
|
| 666 |
+
|
| 667 |
+
print("File completed and fixed!")
|
| 668 |
+
|
| 669 |
+
from colorize_runner_fixed_optimized import colorize_image, detect_media, is_image
|
| 670 |
+
print("Import successful!")
|
| 671 |
+
|
| 672 |
+
# --- 🔹 IMAGE COLORIZATION CELL (with Upload + Download + Control Buttons) 🔹 ---
|
| 673 |
+
from datetime import datetime
|
| 674 |
+
from IPython.display import display, clear_output
|
| 675 |
+
import cv2, os, time
|
| 676 |
+
from google.colab import files
|
| 677 |
+
import ipywidgets as widgets
|
| 678 |
+
|
| 679 |
+
def run_image_colorization(input_path, render_factor=35, resize_factor=1.0):
|
| 680 |
+
"""
|
| 681 |
+
Enhanced DeOldify Image Colorizer
|
| 682 |
+
---------------------------------
|
| 683 |
+
✅ Upload support
|
| 684 |
+
✅ Auto grayscale detection
|
| 685 |
+
✅ Before/After preview
|
| 686 |
+
✅ Download button (Colab-native)
|
| 687 |
+
✅ Rerun & Clear helpers
|
| 688 |
+
"""
|
| 689 |
+
from colorize_runner_fixed_optimized import colorize_image, detect_media, is_image
|
| 690 |
+
|
| 691 |
+
if not os.path.exists(input_path):
|
| 692 |
+
raise FileNotFoundError(f"File not found: {input_path}")
|
| 693 |
+
if not is_image(input_path):
|
| 694 |
+
raise ValueError("Provided path is not a valid image.")
|
| 695 |
+
|
| 696 |
+
# --- Detect grayscale ---
|
| 697 |
+
img = cv2.imread(input_path)
|
| 698 |
+
gray_check = (
|
| 699 |
+
len(img.shape) < 3 or img.shape[2] == 1
|
| 700 |
+
or (cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) == img[:, :, 0]).all()
|
| 701 |
+
)
|
| 702 |
+
if not gray_check:
|
| 703 |
+
print("⚠️ Image appears already colored — still running for enhancement.")
|
| 704 |
+
|
| 705 |
+
# --- Output path ---
|
| 706 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 707 |
+
output_path = f"/content/colorized_{timestamp}.jpg"
|
| 708 |
+
|
| 709 |
+
# --- Colorize ---
|
| 710 |
+
print("🎨 Starting colorization...")
|
| 711 |
+
start_time = time.time()
|
| 712 |
+
result = colorize_image(input_path, output_path, render_factor=render_factor, resize_factor=resize_factor)
|
| 713 |
+
end_time = time.time()
|
| 714 |
+
print(f"✅ Done in {end_time - start_time:.2f}s — saved at {output_path}")
|
| 715 |
+
|
| 716 |
+
# --- Before/After display ---
|
| 717 |
+
before = cv2.cvtColor(cv2.imread(input_path), cv2.COLOR_BGR2RGB)
|
| 718 |
+
after = cv2.cvtColor(cv2.imread(result['output_path']), cv2.COLOR_BGR2RGB)
|
| 719 |
+
|
| 720 |
+
import matplotlib.pyplot as plt
|
| 721 |
+
plt.figure(figsize=(14,6))
|
| 722 |
+
plt.subplot(1,2,1); plt.imshow(before); plt.title("Before"); plt.axis("off")
|
| 723 |
+
plt.subplot(1,2,2); plt.imshow(after); plt.title("After"); plt.axis("off")
|
| 724 |
+
plt.show()
|
| 725 |
+
|
| 726 |
+
# --- Caption (optional) ---
|
| 727 |
+
if result.get('caption'):
|
| 728 |
+
print(f"🧠 Caption: {result['caption']}")
|
| 729 |
+
|
| 730 |
+
# --- Buttons ---
|
| 731 |
+
download_btn = widgets.Button(description="⬇️ Download Image", button_style='success', icon='download')
|
| 732 |
+
rerun_btn = widgets.Button(description="🔁 Re-run", button_style='info', icon='refresh')
|
| 733 |
+
clear_btn = widgets.Button(description="🧹 Clear", button_style='warning', icon='trash')
|
| 734 |
+
|
| 735 |
+
def on_download(b): files.download(output_path)
|
| 736 |
+
def on_clear(b): clear_output(); print("🧹 Output cleared.")
|
| 737 |
+
def on_rerun(b): clear_output(); print("🔁 Re-running..."); run_image_colorization(input_path)
|
| 738 |
+
|
| 739 |
+
download_btn.on_click(on_download)
|
| 740 |
+
clear_btn.on_click(on_clear)
|
| 741 |
+
rerun_btn.on_click(on_rerun)
|
| 742 |
+
display(widgets.HBox([download_btn, rerun_btn, clear_btn]))
|
| 743 |
+
|
| 744 |
+
return result['output_path']
|
| 745 |
+
|
| 746 |
+
# --- Upload section ---
|
| 747 |
+
uploader = widgets.FileUpload(accept='image/*', multiple=False)
|
| 748 |
+
display(widgets.HTML("<h3>📤 Upload an Image for Colorization</h3>"))
|
| 749 |
+
display(uploader)
|
| 750 |
+
|
| 751 |
+
def handle_upload(change):
|
| 752 |
+
if uploader.value:
|
| 753 |
+
for name, file_info in uploader.value.items():
|
| 754 |
+
path = f"/content/{name}"
|
| 755 |
+
with open(path, 'wb') as f:
|
| 756 |
+
f.write(file_info['content'])
|
| 757 |
+
print(f"✅ Uploaded: {path}")
|
| 758 |
+
run_image_colorization(path)
|
| 759 |
+
|
| 760 |
+
uploader.observe(handle_upload, names='value')
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
# --- 🔹 VIDEO COLORIZATION CELL (with Upload + Download + Controls) 🔹 ---
|
| 765 |
+
import os, time
|
| 766 |
+
from IPython.display import display, clear_output
|
| 767 |
+
from google.colab import files
|
| 768 |
+
import ipywidgets as widgets
|
| 769 |
+
|
| 770 |
+
def run_video_colorization(input_path):
|
| 771 |
+
"""
|
| 772 |
+
DeOldify Video Colorizer with UI
|
| 773 |
+
--------------------------------
|
| 774 |
+
✅ Upload video support
|
| 775 |
+
✅ Automatic downscale → colorize → upscale
|
| 776 |
+
✅ Download button
|
| 777 |
+
✅ Clear & Rerun helpers
|
| 778 |
+
"""
|
| 779 |
+
lowres_video = "/content/video_lowres.mp4"
|
| 780 |
+
colorized_lowres = "/content/sample_color_lowres.mp4"
|
| 781 |
+
final_upscaled = "/content/sample_color_final.mp4"
|
| 782 |
+
|
| 783 |
+
print("🎬 Starting video colorization...")
|
| 784 |
+
|
| 785 |
+
# --- Step 2: Downscale ---
|
| 786 |
+
print("⬇️ Downscaling for faster processing...")
|
| 787 |
+
!ffmpeg -y -i "$input_path" -vf scale=640:-1 -r 15 "$lowres_video"
|
| 788 |
+
|
| 789 |
+
# --- Step 3: Colorize ---
|
| 790 |
+
print("🎨 Running DeOldify colorization...")
|
| 791 |
+
start_time = time.time()
|
| 792 |
+
main_cli(["--video", lowres_video, "--out", colorized_lowres])
|
| 793 |
+
end_time = time.time()
|
| 794 |
+
print(f"✅ Colorization done in {end_time - start_time:.2f}s.")
|
| 795 |
+
|
| 796 |
+
# --- Step 4: Upscale ---
|
| 797 |
+
print("⬆️ Upscaling to 1080p 24fps...")
|
| 798 |
+
!ffmpeg -y -i "$colorized_lowres" -vf scale=1920:1080 -r 24 "$final_upscaled"
|
| 799 |
+
print(f"✅ Final video saved at: {final_upscaled}")
|
| 800 |
+
|
| 801 |
+
# --- Buttons ---
|
| 802 |
+
download_btn = widgets.Button(description="⬇️ Download Video", button_style='success', icon='download')
|
| 803 |
+
rerun_btn = widgets.Button(description="🔁 Re-run", button_style='info', icon='refresh')
|
| 804 |
+
clear_btn = widgets.Button(description="🧹 Clear", button_style='warning', icon='trash')
|
| 805 |
+
|
| 806 |
+
def on_download(b): files.download(final_upscaled)
|
| 807 |
+
def on_clear(b): clear_output(); print("🧹 Output cleared.")
|
| 808 |
+
def on_rerun(b): clear_output(); print("🔁 Re-running..."); run_video_colorization(input_path)
|
| 809 |
+
|
| 810 |
+
download_btn.on_click(on_download)
|
| 811 |
+
clear_btn.on_click(on_clear)
|
| 812 |
+
rerun_btn.on_click(on_rerun)
|
| 813 |
+
display(widgets.HBox([download_btn, rerun_btn, clear_btn]))
|
| 814 |
+
|
| 815 |
+
# --- Upload section ---
|
| 816 |
+
video_uploader = widgets.FileUpload(accept='video/*', multiple=False)
|
| 817 |
+
display(widgets.HTML("<h3>📤 Upload a Video for Colorization</h3>"))
|
| 818 |
+
display(video_uploader)
|
| 819 |
+
|
| 820 |
+
def handle_video_upload(change):
|
| 821 |
+
if video_uploader.value:
|
| 822 |
+
for name, file_info in video_uploader.value.items():
|
| 823 |
+
path = f"/content/{name}"
|
| 824 |
+
with open(path, 'wb') as f:
|
| 825 |
+
f.write(file_info['content'])
|
| 826 |
+
print(f"✅ Uploaded: {path}")
|
| 827 |
+
run_video_colorization(path)
|
| 828 |
+
|
| 829 |
+
video_uploader.observe(handle_video_upload, names='value')
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
!pip install gradio
|
| 834 |
+
|
| 835 |
+
# --- 🔹 AI COLORIZATION WEB APP (Gradio Interface) 🔹 ---
|
| 836 |
+
import gradio as gr
|
| 837 |
+
import os
|
| 838 |
+
import time
|
| 839 |
+
import cv2
|
| 840 |
+
|
| 841 |
+
from colorize_runner_fixed_optimized import colorize_image
|
| 842 |
+
# main_cli should already be imported from your existing code
|
| 843 |
+
|
| 844 |
+
# --- Image Colorization Wrapper for Gradio ---
|
| 845 |
+
def colorize_image_app(image):
|
| 846 |
+
"""
|
| 847 |
+
Gradio wrapper for image colorization.
|
| 848 |
+
"""
|
| 849 |
+
if image is None:
|
| 850 |
+
return None, "⚠️ Please upload an image first."
|
| 851 |
+
|
| 852 |
+
output_path = "/content/colorized_image_gradio.jpg"
|
| 853 |
+
try:
|
| 854 |
+
start_time = time.time()
|
| 855 |
+
result = colorize_image(image, output_path)
|
| 856 |
+
end_time = time.time()
|
| 857 |
+
msg = f"✅ Image colorized successfully in {end_time - start_time:.2f}s!"
|
| 858 |
+
return output_path, msg
|
| 859 |
+
except Exception as e:
|
| 860 |
+
return None, f"❌ Error: {str(e)}"
|
| 861 |
+
|
| 862 |
+
# --- Video Colorization Wrapper for Gradio ---
|
| 863 |
+
def colorize_video_app(video):
|
| 864 |
+
"""
|
| 865 |
+
Gradio wrapper for video colorization.
|
| 866 |
+
"""
|
| 867 |
+
if video is None:
|
| 868 |
+
return None, "⚠️ Please upload a video first."
|
| 869 |
+
|
| 870 |
+
input_video = video
|
| 871 |
+
lowres_video = "/content/video_lowres_gradio.mp4"
|
| 872 |
+
colorized_lowres = "/content/sample_color_lowres_gradio.mp4"
|
| 873 |
+
final_upscaled = "/content/sample_color_final_gradio.mp4"
|
| 874 |
+
|
| 875 |
+
try:
|
| 876 |
+
print("⬇️ Downscaling video for faster processing...")
|
| 877 |
+
os.system(f'ffmpeg -y -i "{input_video}" -vf scale=640:-1 -r 15 "{lowres_video}"')
|
| 878 |
+
|
| 879 |
+
print("🎨 Running DeOldify colorization...")
|
| 880 |
+
start_time = time.time()
|
| 881 |
+
main_cli(["--video", lowres_video, "--out", colorized_lowres])
|
| 882 |
+
end_time = time.time()
|
| 883 |
+
print(f"✅ Done in {end_time - start_time:.2f}s.")
|
| 884 |
+
|
| 885 |
+
print("⬆️ Upscaling to 1080p 24fps...")
|
| 886 |
+
os.system(f'ffmpeg -y -i "{colorized_lowres}" -vf scale=1920:1080 -r 24 "{final_upscaled}"')
|
| 887 |
+
|
| 888 |
+
msg = f"✅ Video colorized successfully in {end_time - start_time:.2f}s!"
|
| 889 |
+
return final_upscaled, msg
|
| 890 |
+
except Exception as e:
|
| 891 |
+
return None, f"❌ Error: {str(e)}"
|
| 892 |
+
|
| 893 |
+
# --- BUILD GRADIO INTERFACE ---
|
| 894 |
+
with gr.Blocks() as demo:
|
| 895 |
+
gr.Markdown("""
|
| 896 |
+
# 🎨 AI-Based Image & Video Colorization
|
| 897 |
+
Upload grayscale media and watch it come to life with color!
|
| 898 |
+
""")
|
| 899 |
+
|
| 900 |
+
with gr.Tab("🖼️ Image Colorization"):
|
| 901 |
+
img_input = gr.Image(type="filepath", label="Upload Image")
|
| 902 |
+
img_output = gr.Image(label="Colorized Output")
|
| 903 |
+
img_status = gr.Textbox(label="Status", interactive=False)
|
| 904 |
+
gr.Button("🎨 Colorize Image").click(colorize_image_app, inputs=img_input, outputs=[img_output, img_status])
|
| 905 |
+
|
| 906 |
+
with gr.Tab("🎬 Video Colorization"):
|
| 907 |
+
vid_input = gr.Video(label="Upload Video")
|
| 908 |
+
vid_output = gr.Video(label="Colorized Output")
|
| 909 |
+
vid_status = gr.Textbox(label="Status", interactive=False)
|
| 910 |
+
gr.Button("🎨 Colorize Video").click(colorize_video_app, inputs=vid_input, outputs=[vid_output, vid_status])
|
| 911 |
+
|
| 912 |
+
gr.Markdown("Developed by [Your Name] — Final Year Project 2025 🎓")
|
| 913 |
+
|
| 914 |
+
# --- LAUNCH APP ---
|
| 915 |
+
demo.launch(share=True)
|