File size: 4,201 Bytes
9549cef
 
 
 
 
70ac0f8
9549cef
 
62ca108
9549cef
 
c1762c2
9549cef
db17d5e
9549cef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36bd960
9549cef
a7a7bb6
c74635b
36bd960
c74635b
 
 
a09ed1b
c74635b
 
 
 
9549cef
a7a7bb6
9549cef
 
7279d73
9549cef
 
 
 
36bd960
9549cef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import gradio as gr
import os
hf_token = os.environ.get("HF_TOKEN")
import spaces
import torch
from pipeline_bria import BriaPipeline, BriaTransformer2DModel
import time

resolutions = ["1024 1024","1280 768","1344 768","768 1344","768 1280"] 

# Ng
default_negative_prompt= "Logo,Watermark,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"

pipe = BriaPipeline.from_pretrained("briaai/BRIA-3.2",revision="pre_diffusers_support", torch_dtype=torch.bfloat16,trust_remote_code=True)
pipe.to(device="cuda")

@spaces.GPU(enable_queue=True)
def infer(prompt,negative_prompt,seed,resolution):
    print(f"""
    —/n
    {prompt}
    """)
    
    # generator = torch.Generator("cuda").manual_seed(555)
    t=time.time()

    if seed=="-1":
        generator=None
    else:
        try:
            seed=int(seed)
            generator = torch.Generator("cuda").manual_seed(seed)
        except:
            generator=None

    w,h = resolution.split()
    w,h = int(w),int(h)
    image = pipe(prompt,num_inference_steps=30, negative_prompt=negative_prompt,generator=generator,width=w,height=h).images[0]
    print(f'gen time is {time.time()-t} secs')
    
    # Future
    # Add amound of steps
    # if nsfw:
    #     raise gr.Error("Generated image is NSFW")
    
    return image

css = """
#col-container{
    margin: 0 auto;
    max-width: 580px;
}
"""
with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.Markdown("## BRIA 3.2")
        gr.HTML('''
         <p style="margin-bottom: 10px; font-size: 94%">
            This is a demo for 
            <a href="https://huggingface.co/briaai/BRIA-3.2" target="_blank">BRIA 3.2 text-to-image </a>. 
            is our latest commercial-ready text-to-image model that significantly improves aesthetics and excels at rendering clear, readable text, particularly optimized for short phrases (1-6 words) while still trained on licensed data, and so provide full legal liability coverage for copyright and privacy infringement.
          </p>
          <p style="margin-bottom: 10px; font-size: 94%">
            API Endpoint available on: <a href="https://docs.bria.ai/image-generation/endpoints/text-to-image-base" target="_blank">Bria.ai</a>. <a href="https://fal.ai/models/bria/text-to-image/3.2" target="_blank">Fal.ai</a>. <a href="https://replicate.com/bria/image-3.2" target="_blank">Replicate.com</a>.
          </p>
        <p style="margin-bottom: 10px; font-size: 94%">
            ComfyUI node is available here: <a href="https://github.com/Bria-AI/ComfyUI-BRIA-API" target="_blank">ComfyUI Node</a>.
          </p>
        ''')
        
        with gr.Group():
            with gr.Column():
                prompt_in = gr.Textbox(label="Prompt", value="""photo of mystical dragon eating sushi, text bubble says "Sushi Time".""")
                resolution = gr.Dropdown(value=resolutions[0], show_label=True, label="Resolution", choices=resolutions)
                seed = gr.Textbox(label="Seed", value=-1)
                negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt)
                submit_btn = gr.Button("Generate")
        result = gr.Image(label="BRIA-3.2 Result")

        # gr.Examples(
        #     examples = [ 
        #         "Dragon, digital art, by Greg Rutkowski",
        #         "Armored knight holding sword",
        #         "A flat roof villa near a river with black walls and huge windows",
        #         "A calm and peaceful office",
        #         "Pirate guinea pig"
        #     ],
        #     fn = infer, 
        #     inputs = [
        #         prompt_in
        #     ],
        #     outputs = [
        #         result
        #     ]
        # )

    submit_btn.click(
        fn = infer,
        inputs = [
            prompt_in,
            negative_prompt,
            seed,
            resolution
        ],
        outputs = [
            result
        ]
    )
demo.queue().launch(show_api=False)