K00B404 commited on
Commit
504d46f
·
verified ·
1 Parent(s): 6535cc7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +166 -0
app.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import StableDiffusionPipeline
3
+ import torch
4
+ from PIL import Image
5
+ import re
6
+ import uuid
7
+ import gc
8
+
9
+ # Load word list for safety checking (using a simple list instead of loading dataset)
10
+ BLOCKED_WORDS = ["nsfw", "nude", "explicit"] # Add more as needed
11
+
12
+ # Initialize the pipeline with CPU optimization
13
+ def initialize_pipeline():
14
+ pipe = StableDiffusionPipeline.from_pretrained(
15
+ "stabilityai/stable-diffusion-2-1-base",
16
+ torch_dtype=torch.float32 # Use float32 for CPU
17
+ )
18
+ pipe = pipe.to("cpu")
19
+ # Enable memory efficient attention
20
+ pipe.enable_attention_slicing()
21
+ return pipe
22
+
23
+ pipe = initialize_pipeline()
24
+
25
+ def cleanup():
26
+ """Force garbage collection to free memory"""
27
+ gc.collect()
28
+ if torch.cuda.is_available():
29
+ torch.cuda.empty_cache()
30
+
31
+ def infer(prompt, negative, scale):
32
+ # Safety check
33
+ prompt = prompt.lower()
34
+ for word in BLOCKED_WORDS:
35
+ if word in prompt:
36
+ raise gr.Error("Unsafe content found. Please try again with different prompts.")
37
+
38
+ try:
39
+ # Generate only one image at a time to conserve memory
40
+ images = pipe(
41
+ prompt=prompt,
42
+ negative_prompt=negative,
43
+ guidance_scale=scale,
44
+ num_inference_steps=30, # Reduced steps for faster generation
45
+ num_images_per_prompt=1
46
+ ).images
47
+
48
+ # Save image
49
+ output_path = f"{uuid.uuid4()}.jpg"
50
+ images[0].save(output_path)
51
+
52
+ # Cleanup to free memory
53
+ cleanup()
54
+
55
+ return [output_path]
56
+
57
+ except Exception as e:
58
+ cleanup()
59
+ raise gr.Error(f"Generation failed: {str(e)}")
60
+
61
+ css = """
62
+ .gradio-container {
63
+ max-width: 768px !important;
64
+ font-family: 'IBM Plex Sans', sans-serif;
65
+ }
66
+ .gr-button {
67
+ color: white;
68
+ border-color: black;
69
+ background: black;
70
+ }
71
+ input[type='range'] {
72
+ accent-color: black;
73
+ }
74
+ .dark input[type='range'] {
75
+ accent-color: #dfdfdf;
76
+ }
77
+ #gallery {
78
+ min-height: 22rem;
79
+ margin-bottom: 15px;
80
+ }
81
+ #gallery>div>.h-full {
82
+ min-height: 20rem;
83
+ }
84
+ """
85
+
86
+ examples = [
87
+ [
88
+ 'A small cabin on top of a snowy mountain, artstation style',
89
+ 'low quality, ugly',
90
+ 9
91
+ ],
92
+ [
93
+ 'A red apple on a wooden table, still life',
94
+ 'low quality',
95
+ 9
96
+ ],
97
+ ]
98
+
99
+ with gr.Blocks(css=css) as block:
100
+ gr.HTML(
101
+ """
102
+ <div style="text-align: center; margin: 0 auto;">
103
+ <h1 style="font-weight: 900; margin-bottom: 7px;">
104
+ Stable Diffusion 2.1 (CPU Version)
105
+ </h1>
106
+ <p style="margin-bottom: 10px; font-size: 94%;">
107
+ Optimized for CPU usage with 16GB RAM
108
+ </p>
109
+ </div>
110
+ """
111
+ )
112
+
113
+ with gr.Group():
114
+ with gr.Row():
115
+ with gr.Column(scale=3):
116
+ text = gr.Textbox(
117
+ label="Enter your prompt",
118
+ show_label=False,
119
+ max_lines=1,
120
+ placeholder="Enter your prompt"
121
+ )
122
+ negative = gr.Textbox(
123
+ label="Enter your negative prompt",
124
+ show_label=False,
125
+ max_lines=1,
126
+ placeholder="Enter a negative prompt"
127
+ )
128
+ with gr.Column(scale=1, min_width=150):
129
+ btn = gr.Button("Generate image")
130
+
131
+ gallery = gr.Gallery(
132
+ label="Generated images",
133
+ show_label=False,
134
+ elem_id="gallery"
135
+ )
136
+
137
+ with gr.Accordion("Advanced settings", open=False):
138
+ guidance_scale = gr.Slider(
139
+ label="Guidance Scale",
140
+ minimum=1,
141
+ maximum=20,
142
+ value=9,
143
+ step=0.1
144
+ )
145
+
146
+ gr.Examples(
147
+ examples=examples,
148
+ fn=infer,
149
+ inputs=[text, negative, guidance_scale],
150
+ outputs=[gallery],
151
+ cache_examples=True
152
+ )
153
+
154
+ text.submit(infer, inputs=[text, negative, guidance_scale], outputs=[gallery])
155
+ negative.submit(infer, inputs=[text, negative, guidance_scale], outputs=[gallery])
156
+ btn.click(infer, inputs=[text, negative, guidance_scale], outputs=[gallery])
157
+
158
+ gr.HTML(
159
+ """
160
+ <div style="text-align: center; margin-top: 20px;">
161
+ <p>Running on CPU - Please allow longer generation times</p>
162
+ </div>
163
+ """
164
+ )
165
+
166
+ block.queue().launch(show_error=True)