Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from diffusers import FluxPriorReduxPipeline, FluxPipeline | |
| from PIL import Image | |
| from huggingface_hub import login | |
| import os | |
| # Hugging Face HubのAPIキーを設定 | |
| login(os.getenv("HF_API_KEY")) | |
| # Lazy Loadingを実現するため、モデルロードは関数内で行う | |
| def process_image(image_path): | |
| # 入力画像をロードしてリサイズ | |
| image = Image.open(image_path).convert("RGB") | |
| image = image.resize((256, 256)) # サイズを256x256に制限してメモリ節約 | |
| # Prior Reduxパイプラインのロードと処理 | |
| pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained( | |
| "black-forest-labs/FLUX.1-Redux-dev", | |
| use_auth_token=True | |
| ).to("cpu") | |
| pipe_prior_output = pipe_prior_redux(image) | |
| # FLUXパイプラインのロードと処理 | |
| pipe = FluxPipeline.from_pretrained( | |
| "black-forest-labs/FLUX.1-dev", | |
| use_auth_token=True, | |
| text_encoder=None, | |
| text_encoder_2=None | |
| ).to("cpu") | |
| images = pipe( | |
| guidance_scale=2.5, | |
| num_inference_steps=25, # 推論ステップを減らしてメモリ節約 | |
| generator=torch.Generator("cpu").manual_seed(0), # 再現性のためのシード値 | |
| **pipe_prior_output, | |
| ).images | |
| # 結果画像を返す | |
| return images[0] | |
| # Gradioインターフェースを構築 | |
| def infer(image): | |
| result_image = process_image(image) | |
| return result_image | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# FLUX Image Generation App (Optimized for CPU)") | |
| with gr.Row(): | |
| input_image = gr.Image(type="filepath", label="Input Image") | |
| output_image = gr.Image(type="pil", label="Generated Image") | |
| submit_button = gr.Button("Generate") | |
| submit_button.click(fn=infer, inputs=[input_image], outputs=[output_image]) | |
| demo.launch() | |