| | import gradio as gr |
| | import torch |
| | from torchvision import transforms |
| | from PIL import Image |
| |
|
| | |
| | model_paths = { |
| | "All colors": "unet_generator.pt", |
| | "20 colors only": "20color_generator.pt" |
| | } |
| |
|
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| |
|
| | |
| | transform = transforms.Compose([ |
| | transforms.Resize((512, 512)), |
| | transforms.ToTensor(), |
| | ]) |
| |
|
| | |
| | def load_model(path): |
| | model = torch.jit.load(path, map_location=device) |
| | model.eval() |
| | return model |
| |
|
| | |
| | def colorize(image, selected_model): |
| | """ |
| | Converts the input image to grayscale, displays it, |
| | and generates the colorized version using the selected model. |
| | """ |
| | |
| | gray = image.convert("L") |
| |
|
| | |
| | gray_tensor = transform(gray).unsqueeze(0).to(device) |
| |
|
| | |
| | model = load_model(model_paths[selected_model]) |
| |
|
| | |
| | with torch.no_grad(): |
| | output = model(gray_tensor) |
| |
|
| | |
| | output = output.squeeze(0).permute(1, 2, 0).clamp(0, 1).cpu().numpy() |
| | output_image = Image.fromarray((output * 255).astype('uint8')) |
| |
|
| | return gray, output_image |
| |
|
| | |
| | gr.Interface( |
| | fn=colorize, |
| | inputs=[ |
| | gr.Image(type="pil", label="Input Image"), |
| | gr.Radio(choices=["All colors", "20 colors only"], label="Model") |
| | ], |
| | outputs=[ |
| | gr.Image(type="pil", label="Grayscale Image"), |
| | gr.Image(type="pil", label="Colorized Image") |
| | ], |
| | title="Image Colorization", |
| | description=( |
| | "Upload a color image and choose a model to see it colorized from a grayscale version. " |
| | "The system first converts the input image to black and white, then uses a trained deep learning model " |
| | "to generate a colorized version. You can experiment with two models: one trained on a full color palette " |
| | "and another limited to just 20 colors." |
| | ) |
| | ).launch() |
| |
|