|
|
import gradio as gr |
|
|
import os |
|
|
hf_token = os.environ.get("HF_TOKEN") |
|
|
import spaces |
|
|
import torch |
|
|
from pipeline_bria import BriaPipeline, BriaTransformer2DModel |
|
|
import time |
|
|
|
|
|
resolutions = ["1024 1024","1280 768","1344 768","768 1344","768 1280"] |
|
|
|
|
|
|
|
|
default_negative_prompt= "Logo,Watermark,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers" |
|
|
|
|
|
pipe = BriaPipeline.from_pretrained("briaai/BRIA-3.2",revision="pre_diffusers_support", torch_dtype=torch.bfloat16,trust_remote_code=True) |
|
|
pipe.to(device="cuda") |
|
|
|
|
|
@spaces.GPU(enable_queue=True) |
|
|
def infer(prompt,negative_prompt,seed,resolution): |
|
|
print(f""" |
|
|
—/n |
|
|
{prompt} |
|
|
""") |
|
|
|
|
|
|
|
|
t=time.time() |
|
|
|
|
|
if seed=="-1": |
|
|
generator=None |
|
|
else: |
|
|
try: |
|
|
seed=int(seed) |
|
|
generator = torch.Generator("cuda").manual_seed(seed) |
|
|
except: |
|
|
generator=None |
|
|
|
|
|
w,h = resolution.split() |
|
|
w,h = int(w),int(h) |
|
|
image = pipe(prompt,num_inference_steps=30, negative_prompt=negative_prompt,generator=generator,width=w,height=h).images[0] |
|
|
print(f'gen time is {time.time()-t} secs') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return image |
|
|
|
|
|
css = """ |
|
|
#col-container{ |
|
|
margin: 0 auto; |
|
|
max-width: 580px; |
|
|
} |
|
|
""" |
|
|
with gr.Blocks(css=css) as demo: |
|
|
with gr.Column(elem_id="col-container"): |
|
|
gr.Markdown("## BRIA 3.2") |
|
|
gr.HTML(''' |
|
|
<p style="margin-bottom: 10px; font-size: 94%"> |
|
|
This is a demo for |
|
|
<a href="https://huggingface.co/briaai/BRIA-3.2" target="_blank">BRIA 3.2 text-to-image </a>. |
|
|
is our latest commercial-ready text-to-image model that significantly improves aesthetics and excels at rendering clear, readable text, particularly optimized for short phrases (1-6 words) while still trained on licensed data, and so provide full legal liability coverage for copyright and privacy infringement. |
|
|
</p> |
|
|
<p style="margin-bottom: 10px; font-size: 94%"> |
|
|
API Endpoint available on: <a href="https://docs.bria.ai/image-generation/endpoints/text-to-image-base" target="_blank">Bria.ai</a>. <a href="https://fal.ai/models/bria/text-to-image/3.2" target="_blank">Fal.ai</a>. <a href="https://replicate.com/bria/image-3.2" target="_blank">Replicate.com</a>. |
|
|
</p> |
|
|
<p style="margin-bottom: 10px; font-size: 94%"> |
|
|
ComfyUI node is available here: <a href="https://github.com/Bria-AI/ComfyUI-BRIA-API" target="_blank">ComfyUI Node</a>. |
|
|
</p> |
|
|
''') |
|
|
|
|
|
with gr.Group(): |
|
|
with gr.Column(): |
|
|
prompt_in = gr.Textbox(label="Prompt", value="""photo of mystical dragon eating sushi, text bubble says "Sushi Time".""") |
|
|
resolution = gr.Dropdown(value=resolutions[0], show_label=True, label="Resolution", choices=resolutions) |
|
|
seed = gr.Textbox(label="Seed", value=-1) |
|
|
negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt) |
|
|
submit_btn = gr.Button("Generate") |
|
|
result = gr.Image(label="BRIA-3.2 Result") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
submit_btn.click( |
|
|
fn = infer, |
|
|
inputs = [ |
|
|
prompt_in, |
|
|
negative_prompt, |
|
|
seed, |
|
|
resolution |
|
|
], |
|
|
outputs = [ |
|
|
result |
|
|
] |
|
|
) |
|
|
demo.queue().launch(show_api=False) |