Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import numpy as np | |
| import random | |
| import io | |
| import zipfile | |
| from PIL import Image, ImageEnhance | |
| import torch | |
| from diffusers import DiffusionPipeline | |
| from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast | |
| # Configuration | |
| dtype = torch.bfloat16 | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # Load the model | |
| pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype).to(device) | |
| # Constants | |
| MAX_SEED = np.iinfo(np.int32).max | |
| MAX_IMAGE_SIZE = 2048 | |
| # Define the inference function | |
| def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, num_variations=1, brightness=1.0, contrast=1.0, saturation=1.0, style="Style1"): | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator().manual_seed(seed) | |
| images = [] | |
| for _ in range(num_variations): | |
| image = pipe( | |
| prompt=prompt, | |
| width=width, | |
| height=height, | |
| num_inference_steps=num_inference_steps, | |
| generator=generator, | |
| guidance_scale=guidance_scale | |
| ).images[0] | |
| # Apply image adjustments | |
| image = adjust_image(image, brightness, contrast, saturation) | |
| images.append(image) | |
| # Apply style (dummy implementation, replace with actual style application) | |
| images = [apply_style(img, style) for img in images] | |
| return images, seed | |
| def adjust_image(image, brightness, contrast, saturation): | |
| enhancer = ImageEnhance.Brightness(image) | |
| image = enhancer.enhance(brightness) | |
| enhancer = ImageEnhance.Contrast(image) | |
| image = enhancer.enhance(contrast) | |
| enhancer = ImageEnhance.Color(image) | |
| image = enhancer.enhance(saturation) | |
| return image | |
| def apply_style(image, style): | |
| # Dummy style application | |
| return image | |
| def download_all(images): | |
| with io.BytesIO() as buffer: | |
| with zipfile.ZipFile(buffer, 'w') as zipf: | |
| for i, img in enumerate(images): | |
| img_byte_arr = io.BytesIO() | |
| img.save(img_byte_arr, format="PNG") | |
| zipf.writestr(f'image_{i}.png', img_byte_arr.getvalue()) | |
| buffer.seek(0) | |
| return buffer.getvalue() | |
| # Gradio interface | |
| css = """ | |
| #col-container { | |
| margin: 0 auto; | |
| max-width: 720px; | |
| } | |
| """ | |
| examples = [ | |
| "a tiny astronaut hatching from an egg on the moon", | |
| "a cat holding a sign that says hello world", | |
| "an anime illustration of a wiener schnitzel", | |
| ] | |
| with gr.Blocks(css=css) as demo: | |
| with gr.Column(elem_id="col-container"): | |
| gr.Markdown(f"""# FLUX.1 [dev] | |
| 12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) | |
| [[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)] | |
| """) | |
| with gr.Row(): | |
| prompt = gr.Textbox(label="Prompts (comma-separated)", placeholder="Enter multiple prompts separated by commas", lines=2) | |
| run_button = gr.Button("Run", scale=0) | |
| result = gr.Gallery(label="Image Gallery").style(height=400) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| with gr.Row(): | |
| width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) | |
| height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) | |
| with gr.Row(): | |
| guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=15, step=0.1, value=3.5) | |
| num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28) | |
| with gr.Row(): | |
| num_variations = gr.Slider(label="Number of Variations", minimum=1, maximum=10, step=1, value=1) | |
| brightness = gr.Slider(label="Brightness", minimum=0.0, maximum=2.0, step=0.1, value=1.0) | |
| contrast = gr.Slider(label="Contrast", minimum=0.0, maximum=2.0, step=0.1, value=1.0) | |
| saturation = gr.Slider(label="Saturation", minimum=0.0, maximum=2.0, step=0.1, value=1.0) | |
| style = gr.Dropdown(label="Select Style", choices=["Style1", "Style2", "Style3"], value="Style1") | |
| download_all_button = gr.Button("Download All") | |
| gr.Examples( | |
| examples=examples, | |
| fn=infer, | |
| inputs=[prompt], | |
| outputs=[result, seed], | |
| cache_examples="lazy" | |
| ) | |
| def run_inference(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_variations, brightness, contrast, saturation, style): | |
| images, seed = infer(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_variations, brightness, contrast, saturation, style) | |
| return images, seed | |
| gr.on( | |
| triggers=[run_button.click, prompt.submit], | |
| fn=run_inference, | |
| inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_variations, brightness, contrast, saturation, style], | |
| outputs=[result, seed] | |
| ) | |
| def download_all_callback(images): | |
| return download_all(images) | |
| download_all_button.click(fn=download_all_callback, inputs=[result], outputs=[gr.File(label="Download All Images")]) | |
| demo.launch() | |