Spaces:
Paused
Paused
File size: 3,905 Bytes
725e3cd ef187eb 725e3cd 0cffd40 ef187eb 11fa80e 63b6eaf 874369a 2b0f02c 50180de 11fa80e 0cffd40 8b1e96d 725e3cd 874369a 4efab5c 50180de ce19625 50180de 275bb26 ce19625 725e3cd ce19625 725e3cd 874369a 50180de 8b1e96d 50180de 11fa80e ce19625 874369a 50180de 874369a 0cffd40 50180de 8b3ca8d 50180de 874369a 8b3ca8d 8b1e96d 7d7f873 dba04eb 7d7f873 8b1e96d 0cffd40 50180de 8b1e96d 50180de ce19625 50180de ce19625 50180de 874369a 50180de 8b3ca8d f4107e3 50180de 8b3ca8d fe16630 8b3ca8d 8b1e96d 874369a 50180de 874369a 50180de 874369a 50180de | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 | import spaces
import gradio as gr
import torch
from diffusers import FluxPipeline
from huggingface_hub import hf_hub_download
from PIL import Image
import requests
from translatepy import Translator
import numpy as np
import random
# Initialize Translator
translator = Translator()
# Constants
model = "Shakker-Labs/AWPortrait-FL"
MAX_SEED = np.iinfo(np.int32).max
# Device & dtype setup
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.bfloat16 if device == "cuda" else torch.float32
# Load model and move to device
pipe = FluxPipeline.from_pretrained(model, torch_dtype=dtype)
pipe.to(device)
# Image generation function
@spaces.GPU()
def generate_image(
prompt,
width=768,
height=1024,
scale=3.5,
steps=24,
seed=-1,
nums=1,
progress=gr.Progress(track_tqdm=True)
):
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
generator = torch.Generator(device=device).manual_seed(seed)
prompt = str(translator.translate(prompt, 'English'))
print(f'Prompt: {prompt}')
image = pipe(
prompt,
width=width,
height=height,
guidance_scale=scale,
num_inference_steps=steps,
generator=generator,
output_type="pil",
max_sequence_length=512,
num_images_per_prompt=nums,
).images
return image, seed
# Example prompts
examples = [
"close up portrait, Amidst the interplay of light and shadows in a photography studio,a soft spotlight traces the contours of a face,highlighting a figure clad in a sleek black turtleneck. The garment,hugging the skin with subtle luxury,complements the Caucasian model's understated makeup,embodying minimalist elegance.",
"Caucasian,The image features a young woman of European descent standing in an studio setting,surrounded by silk. (She is wearing a silk dress),paired with a bold. Her brown hair is wet and tousled,falling naturally around her face,giving her a raw and edgy look.",
"A black and white portrait of a young woman with a captivating gaze. She's bundled up in a cozy black sweater,hands gently cupped near her face.",
"Fashion photography portrait,close up portrait,(a woman of European descent is surrounded by lava rock and magma from head to neck, red magma hair, wear volcanic lava rock magma outfit coat lava rock magma fashion costume with ruffled layers"
]
# Gradio Interface
css = """
footer {
visibility: hidden;
}
"""
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
with gr.Group():
with gr.Row():
prompt = gr.Textbox(label='Enter Your Prompt (multilingual)', scale=6)
submit = gr.Button(scale=1, variant='primary')
img = gr.Gallery(label="Gallery", columns=1, preview=True)
with gr.Accordion("Advanced Options", open=False):
with gr.Row():
width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=768)
height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=1024)
with gr.Row():
scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=50, step=0.1, value=3.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=24)
with gr.Row():
seed = gr.Slider(label="Seed (-1 = Random)", minimum=-1, maximum=MAX_SEED, step=1, value=0, visible=True)
nums = gr.Slider(label="Image Numbers", minimum=1, maximum=4, step=1, value=1, scale=1)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=[img, seed],
fn=generate_image,
cache_examples="lazy",
)
gr.on(
triggers=[prompt.submit, submit.click],
fn=generate_image,
inputs=[prompt, width, height, scale, steps, seed, nums],
outputs=[img, seed],
api_name="run",
)
demo.queue().launch()
|