Mmanikandan commited on
Commit
947e82f
·
verified ·
1 Parent(s): 7f17593

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -154
app.py CHANGED
@@ -1,160 +1,45 @@
1
  import gradio as gr
2
- import numpy as np
3
  import torch
4
- from PIL import Image
5
  from diffusers import StableDiffusionPipeline
6
- from transformers import pipeline, set_seed
7
- import random
8
- import re
9
 
10
- model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
 
 
 
11
 
12
- pipe = StableDiffusionPipeline.from_pretrained(model_id).to('cpu')
13
-
14
- gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
15
- gpt2_pipe2 = pipeline('text-generation', model='succinctly/text2image-prompt-generator')
16
-
17
- def infer1(starting_text):
18
- seed = random.randint(100, 1000000)
19
- set_seed(seed)
20
-
21
- if starting_text == "":
22
- starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
23
-
24
- response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
25
- response_list = []
26
- for x in response:
27
- resp = x['generated_text'].strip()
28
- if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
29
- response_list.append(resp+'\n')
30
-
31
- response_end = "\n".join(response_list)
32
- response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
33
- response_end = response_end.replace("<", "").replace(">", "")
34
-
35
- if response_end != "":
36
- return response_end
37
-
38
- def infer2(starting_text):
39
- for count in range(6):
40
- seed = random.randint(100, 1000000)
41
- set_seed(seed)
42
-
43
- # If the text field is empty
44
- if starting_text == "":
45
- starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
46
- starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
47
- print(starting_text)
48
-
49
- response = gpt2_pipe2(starting_text, max_length=random.randint(60, 90), num_return_sequences=8)
50
- response_list = []
51
- for x in response:
52
- resp = x['generated_text'].strip()
53
- if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
54
- response_list.append(resp)
55
-
56
- response_end = "\n".join(response_list)
57
- response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
58
- response_end = response_end.replace("<", "").replace(">", "")
59
- if response_end != "":
60
- return response_end
61
- if count == 5:
62
- return response_end
63
-
64
- def infer3(prompt, negative, steps, scale, seed):
65
  generator = torch.Generator(device='cpu').manual_seed(seed)
66
- img = pipe(
67
- prompt,
68
- height=512,
69
- width=512,
70
- num_inference_steps=steps,
71
- guidance_scale=scale,
72
- negative_prompt = negative,
73
- generator=generator,
74
- ).images
75
- return img
76
-
77
- block = gr.Blocks()
78
-
79
- with block:
80
- with gr.Group():
81
- with gr.Column():
82
- gr.Markdown(
83
- """
84
- Model: Gustavosta/MagicPrompt-Stable-Diffusion
85
- """
86
- )
87
- with gr.Row() as row:
88
- with gr.Column():
89
- txt = gr.Textbox(lines=1, label="Initial Text", placeholder="English Text here")
90
- gpt_btn = gr.Button("Generate prompt").style(
91
- margin=False,
92
- rounded=(False, True, True, False),
93
- )
94
- with gr.Column():
95
- out = gr.Textbox(lines=4, label="Generated Prompts")
96
-
97
- with gr.Box():
98
- gr.Markdown(
99
- """
100
- Model: succinctly/text2image-prompt-generator
101
- """
102
- )
103
- with gr.Row() as row:
104
- with gr.Column():
105
- txt2 = gr.Textbox(lines=1, label="Initial Text", placeholder="English Text here")
106
- gpt_btn2 = gr.Button("Generate prompt").style(
107
- margin=False,
108
- rounded=(False, True, True, False),
109
- )
110
- with gr.Column():
111
- out2 = gr.Textbox(lines=4, label="Generated Prompts")
112
-
113
- with gr.Box():
114
- gr.Markdown(
115
- """
116
- Model: stable diffusion v1.5
117
- """
118
- )
119
- with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
120
- with gr.Column():
121
- text = gr.Textbox(
122
- label="Enter your prompt",
123
- show_label=False,
124
- max_lines=1,
125
- placeholder="Enter your prompt",
126
- ).style(
127
- border=(True, False, True, True),
128
- rounded=(True, False, False, True),
129
- container=False,
130
- )
131
-
132
- negative = gr.Textbox(
133
- label="Enter your negative prompt",
134
- show_label=False,
135
- placeholder="Enter a negative prompt",
136
- elem_id="negative-prompt-text-input",
137
- ).style(
138
- border=(True, False, True, True),
139
- rounded=(True, False, False, True),container=False,
140
- )
141
-
142
- btn = gr.Button("Generate image").style(
143
- margin=False,
144
- rounded=(False, True, True, False),
145
- )
146
- gallery = gr.Gallery(
147
- label="Generated images", show_label=False, elem_id="gallery"
148
- ).style(columns=(1, 2), height="auto")
149
-
150
- with gr.Row(elem_id="advanced-options"):
151
- samples = gr.Slider(label="Images", minimum=1, maximum=1, value=1, step=1, interactive=False)
152
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=12, step=1, interactive=True)
153
- scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1, interactive=True)
154
- seed = gr.Slider(label="Random seed",minimum=0,maximum=2147483647,step=1,randomize=True,interactive=True)
155
-
156
- gpt_btn.click(infer1,inputs=txt,outputs=out)
157
- gpt_btn2.click(infer2,inputs=txt2,outputs=out2)
158
- btn.click(infer3, inputs=[text, negative, steps, scale, seed], outputs=[gallery])
159
-
160
- block.launch(show_api=True,enable_queue=True, debug=True)
 
1
  import gradio as gr
 
2
  import torch
 
3
  from diffusers import StableDiffusionPipeline
 
 
 
4
 
5
+ # Load the model (CPU only)
6
+ model_id = "runwayml/stable-diffusion-v1-5"
7
+ pipe = StableDiffusionPipeline.from_pretrained(model_id)
8
+ pipe = pipe.to("cpu")
9
 
10
+ def generate_image(prompt, negative_prompt, steps, guidance_scale, seed):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  generator = torch.Generator(device='cpu').manual_seed(seed)
12
+ image = pipe(
13
+ prompt=prompt,
14
+ negative_prompt=negative_prompt,
15
+ num_inference_steps=steps,
16
+ guidance_scale=guidance_scale,
17
+ generator=generator
18
+ ).images[0]
19
+ return image
20
+
21
+ # Gradio UI
22
+ with gr.Blocks() as demo:
23
+ gr.Markdown("# 🎨 Stable Diffusion v1.5 (CPU Inference)")
24
+
25
+ with gr.Row():
26
+ prompt = gr.Textbox(label="Prompt", placeholder="A fantasy landscape with waterfalls")
27
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Blurry, low-resolution")
28
+
29
+ with gr.Row():
30
+ steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, value=20, step=1)
31
+ guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, value=7.5, step=0.1)
32
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True)
33
+
34
+ with gr.Row():
35
+ generate_btn = gr.Button("Generate Image")
36
+
37
+ output_image = gr.Image(label="Generated Image", type="pil")
38
+
39
+ generate_btn.click(
40
+ fn=generate_image,
41
+ inputs=[prompt, negative_prompt, steps, guidance_scale, seed],
42
+ outputs=output_image
43
+ )
44
+
45
+ demo.launch(show_api=True)