Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import shutil | |
| import os | |
| from MangaTranslator import MangaTranslator | |
| import torch | |
| _original_jit_load = torch.jit.load | |
| def _safe_jit_load(f, map_location=None, _extra_files=None): | |
| # Force CPU mapping regardless of what the library asks for | |
| return _original_jit_load(f, map_location=torch.device('cpu'), _extra_files=_extra_files) | |
| torch.jit.load = _safe_jit_load | |
| # 1. Initialize the Translator (runs once on startup) | |
| print("⏳ Initializing models... (This takes 30s)") | |
| translator = MangaTranslator( | |
| yolo_model_path='comic_yolov8m.pt', | |
| translation_model="LiquidAI/LFM2-350M-ENJP-MT", | |
| font_path="font.ttf" | |
| ) | |
| print("✅ Models Ready!") | |
| # 2. Define the Processing Function | |
| def translate_manga(input_image): | |
| if input_image is None: | |
| return None | |
| # Create temp paths for the pipeline | |
| temp_in = "temp_input.jpg" | |
| temp_out = "temp_output.jpg" | |
| # Gradio gives us a PIL image, save it so your script can read it | |
| input_image.save(temp_in) | |
| # Run your existing pipeline | |
| try: | |
| translator.process_single_image( | |
| image_path=temp_in, | |
| output_path=temp_out, | |
| series_info=None # No context for the demo | |
| ) | |
| return temp_out | |
| except Exception as e: | |
| print(f"Error: {e}") | |
| return input_image # Return original if fail | |
| # 3. Launch the Interface | |
| if __name__ == "__main__": | |
| iface = gr.Interface( | |
| fn=translate_manga, | |
| inputs=gr.Image(type="pil", label="Upload Manga Page (Japanese)"), | |
| outputs=gr.Image(type="filepath", label="Translated Page (English)"), | |
| title="✨ LiquidAI Manga Translator (350M Demo)", | |
| description="Running natively on CPU using Liquid LFM2-350M, MangaOCR, and LaMa Inpainting.", | |
| examples=[["example.jpg"]] # Optional: If you upload an example image | |
| ) | |
| iface.launch() |