File size: 3,511 Bytes
18cbd80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
import cv2
import torch
from gfpgan import GFPGANer
import numpy as np
import os

# Initialize GFPGANer (this will be done once when the app starts)
# It's important to place model loading outside the prediction function if possible
# to avoid reloading it on every call, which is slow.
MODEL_PATH = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth'

# Check if the model file already exists to avoid re-downloading every time
# This is a simple check; a more robust solution might involve checking file integrity.
# For Hugging Face Spaces, files in the repo are persistent.
# If running locally, this helps avoid re-downloads if the script is restarted.
local_model_path = "GFPGANv1.3.pth"
if not os.path.exists(local_model_path):
    print(f"Downloading model to {local_model_path}...")
    torch.hub.download_url_to_file(MODEL_PATH, local_model_path, progress=True)
    print("Model download complete.")
else:
    print(f"Model {local_model_path} already exists.")


restorer = GFPGANer(
    model_path=local_model_path, # Use local path after download
    upscale=2,
    arch='clean',
    channel_multiplier=2,
    bg_upsampler=None, # Can be 'realesrgan' if RealESRGAN is installed and background upsampling is desired
    device='cpu'
)

def gfpgan_restore_face(input_image_np):
    """
    Restores faces in an input image using GFPGAN.
    input_image_np: A NumPy array representing the input image (BGR format from cv2.imdecode).
    Returns a NumPy array representing the restored image (BGR format).
    """
    if input_image_np is None:
        raise gr.Error("Error: Could not read input image. Please upload a valid image.")

    # GFPGAN expects BGR images, which cv2.imdecode provides if the image has color.
    # If the image is grayscale, cv2.imdecode might return a 2D array.
    # GFPGANer.enhance handles BGR or Grayscale images.

    try:
        # The enhance method returns: cropped_faces, restored_faces, restored_img
        # restored_img is the full image with faces pasted back
        _, _, restored_img = restorer.enhance(
            input_image_np,
            has_aligned=False,
            only_center_face=False
            # paste_to_img=True was removed as it's default or handled internally in newer versions
        )
    except Exception as e:
        print(f"Error during GFPGAN processing: {e}")
        raise gr.Error(f"GFPGAN processing failed: {e}. Check server logs for details.")

    if restored_img is None:
        # This might happen if no faces are detected or an error occurs
        # Return the original image or an error message
        print("No faces were detected or restored by GFPGAN.")
        # raise gr.Error("No faces detected or an error occurred during restoration. Returning original image.")
        return input_image_np # Or an image indicating no faces found

    return restored_img # Return BGR image

# Define the Gradio interface
iface = gr.Interface(
    fn=gfpgan_restore_face,
    inputs=gr.Image(type="numpy", label="Upload Input Image"),
    outputs=gr.Image(type="numpy", label="Restored Output Image"),
    title="GFPGAN Face Restoration (CPU)",
    description="Upload an image with faces to restore them using GFPGAN. Runs on CPU, so it might be slow for large images.",
    allow_flagging="never"
)

if __name__ == '__main__':
    # For Hugging Face Spaces, you typically don't need app.launch() in app.py
    # The Space will run it. For local testing:
    iface.launch()