import os, cv2, numpy as np, onnxruntime as ort, gradio as gr from huggingface_hub import hf_hub_download # --- MODEL SETUP (APISR RRDB GAN - High Fidelity) --- def load_model(): # Using APISR RRDB GAN for significantly better edge reconstruction than base Real-ESRGAN path = hf_hub_download(repo_id="Xenova/2x_APISR_RRDB_GAN_generator-onnx", filename="onnx/model.onnx") opts = ort.SessionOptions() opts.intra_op_num_threads = 2 return ort.InferenceSession(path, opts, providers=['CPUExecutionProvider']) session = load_model() def upscale_image_tiled(frame, tile_size=128, overlap=16): h, w, c = frame.shape output_h, output_w = h * 2, w * 2 upscaled_img = np.zeros((output_h, output_w, c), dtype=np.uint8) stride = tile_size - (overlap * 2) for y in range(0, h, stride): for x in range(0, w, stride): y1, y2 = max(0, y - overlap), min(h, y + stride + overlap) x1, x2 = max(0, x - overlap), min(w, x + stride + overlap) tile = frame[y1:y2, x1:x2] # --- MANDATORY APISR FIX: Pad to Multiple of 8 --- th, tw = tile.shape[:2] pad_h = (8 - (th % 8)) % 8 pad_w = (8 - (tw % 8)) % 8 if pad_h > 0 or pad_w > 0: tile = cv2.copyMakeBorder(tile, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT) # ------------------------------------------------ # AI Inference img_input = cv2.cvtColor(tile, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.0 img_input = np.transpose(img_input, (2, 0, 1))[np.newaxis, :] output = session.run(None, {session.get_inputs()[0].name: img_input})[0] # Post-process tile tile_out = np.clip(np.squeeze(output), 0, 1).transpose(1, 2, 0) tile_out = (tile_out * 255.0).astype(np.uint8) tile_out = cv2.cvtColor(tile_out, cv2.COLOR_RGB2BGR) # Remove the AI padding and the overlap padding, then paste # We only want the part corresponding to the original unpadded tile tile_out = tile_out[:(th*2), :(tw*2)] oy1, oy2 = (y - y1) * 2, (y2 - y) * 2 ox1, ox2 = (x - x1) * 2, (x2 - x) * 2 py1, py2 = y * 2, min(output_h, (y + stride) * 2) px1, px2 = x * 2, min(output_w, (x + stride) * 2) upscaled_img[py1:py2, px1:px2] = tile_out[oy1 : oy1 + (py2-py1), ox1 : ox1 + (px2-px1)] return upscaled_img def run_upscale(img_data, sharpen_amount): if img_data is None: return None img = img_data["composite"] if img.shape[2] == 4: img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB) # AI Upscale (2x APISR) res = upscale_image_tiled(img) # Sharpening for that "Pro" bite if sharpen_amount > 0: blurred = cv2.GaussianBlur(res, (0, 0), 3) res = cv2.addWeighted(res, 1 + sharpen_amount, blurred, -sharpen_amount, 0) return res # --- UI --- with gr.Blocks(theme=gr.themes.Default(primary_hue="blue")) as demo: gr.Markdown("## 💎 Pro APISR-RRDB Upscaler (CPU Optimized)") gr.Markdown("Uses the advanced APISR engine for cleaner edges and better texture restoration.") with gr.Row(): with gr.Column(): img_in = gr.ImageEditor(label="Input (Crop allowed)", type="numpy") sharp_slider = gr.Slider(0, 1, value=0.15, label="Sharpness Strength") btn = gr.Button("UPSCALE 2X", variant="primary") with gr.Column(): img_out = gr.Image(label="High Fidelity Result") btn.click(run_upscale, [img_in, sharp_slider], img_out) if __name__ == "__main__": demo.queue().launch()