GFPGAN / app.py
lucky0146's picture
Create app.py
18cbd80 verified
import gradio as gr
import cv2
import torch
from gfpgan import GFPGANer
import numpy as np
import os
# Initialize GFPGANer (this will be done once when the app starts)
# It's important to place model loading outside the prediction function if possible
# to avoid reloading it on every call, which is slow.
MODEL_PATH = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth'
# Check if the model file already exists to avoid re-downloading every time
# This is a simple check; a more robust solution might involve checking file integrity.
# For Hugging Face Spaces, files in the repo are persistent.
# If running locally, this helps avoid re-downloads if the script is restarted.
local_model_path = "GFPGANv1.3.pth"
if not os.path.exists(local_model_path):
print(f"Downloading model to {local_model_path}...")
torch.hub.download_url_to_file(MODEL_PATH, local_model_path, progress=True)
print("Model download complete.")
else:
print(f"Model {local_model_path} already exists.")
restorer = GFPGANer(
model_path=local_model_path, # Use local path after download
upscale=2,
arch='clean',
channel_multiplier=2,
bg_upsampler=None, # Can be 'realesrgan' if RealESRGAN is installed and background upsampling is desired
device='cpu'
)
def gfpgan_restore_face(input_image_np):
"""
Restores faces in an input image using GFPGAN.
input_image_np: A NumPy array representing the input image (BGR format from cv2.imdecode).
Returns a NumPy array representing the restored image (BGR format).
"""
if input_image_np is None:
raise gr.Error("Error: Could not read input image. Please upload a valid image.")
# GFPGAN expects BGR images, which cv2.imdecode provides if the image has color.
# If the image is grayscale, cv2.imdecode might return a 2D array.
# GFPGANer.enhance handles BGR or Grayscale images.
try:
# The enhance method returns: cropped_faces, restored_faces, restored_img
# restored_img is the full image with faces pasted back
_, _, restored_img = restorer.enhance(
input_image_np,
has_aligned=False,
only_center_face=False
# paste_to_img=True was removed as it's default or handled internally in newer versions
)
except Exception as e:
print(f"Error during GFPGAN processing: {e}")
raise gr.Error(f"GFPGAN processing failed: {e}. Check server logs for details.")
if restored_img is None:
# This might happen if no faces are detected or an error occurs
# Return the original image or an error message
print("No faces were detected or restored by GFPGAN.")
# raise gr.Error("No faces detected or an error occurred during restoration. Returning original image.")
return input_image_np # Or an image indicating no faces found
return restored_img # Return BGR image
# Define the Gradio interface
iface = gr.Interface(
fn=gfpgan_restore_face,
inputs=gr.Image(type="numpy", label="Upload Input Image"),
outputs=gr.Image(type="numpy", label="Restored Output Image"),
title="GFPGAN Face Restoration (CPU)",
description="Upload an image with faces to restore them using GFPGAN. Runs on CPU, so it might be slow for large images.",
allow_flagging="never"
)
if __name__ == '__main__':
# For Hugging Face Spaces, you typically don't need app.launch() in app.py
# The Space will run it. For local testing:
iface.launch()