Spaces:
Sleeping
Sleeping
File size: 1,889 Bytes
42b724a 8293734 42b724a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | import gradio as gr
import shutil
import os
from MangaTranslator import MangaTranslator
import torch
_original_jit_load = torch.jit.load
def _safe_jit_load(f, map_location=None, _extra_files=None):
# Force CPU mapping regardless of what the library asks for
return _original_jit_load(f, map_location=torch.device('cpu'), _extra_files=_extra_files)
torch.jit.load = _safe_jit_load
# 1. Initialize the Translator (runs once on startup)
print("⏳ Initializing models... (This takes 30s)")
translator = MangaTranslator(
yolo_model_path='comic_yolov8m.pt',
translation_model="LiquidAI/LFM2-350M-ENJP-MT",
font_path="font.ttf"
)
print("✅ Models Ready!")
# 2. Define the Processing Function
def translate_manga(input_image):
if input_image is None:
return None
# Create temp paths for the pipeline
temp_in = "temp_input.jpg"
temp_out = "temp_output.jpg"
# Gradio gives us a PIL image, save it so your script can read it
input_image.save(temp_in)
# Run your existing pipeline
try:
translator.process_single_image(
image_path=temp_in,
output_path=temp_out,
series_info=None # No context for the demo
)
return temp_out
except Exception as e:
print(f"Error: {e}")
return input_image # Return original if fail
# 3. Launch the Interface
if __name__ == "__main__":
iface = gr.Interface(
fn=translate_manga,
inputs=gr.Image(type="pil", label="Upload Manga Page (Japanese)"),
outputs=gr.Image(type="filepath", label="Translated Page (English)"),
title="✨ LiquidAI Manga Translator (350M Demo)",
description="Running natively on CPU using Liquid LFM2-350M, MangaOCR, and LaMa Inpainting.",
examples=[["example.jpg"]] # Optional: If you upload an example image
)
iface.launch() |