HAL1993 commited on
Commit
e6e9d2c
·
verified ·
1 Parent(s): bdb7170

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -21
app.py CHANGED
@@ -4,8 +4,9 @@ import torch
4
  import os
5
  from compel import Compel, ReturnedEmbeddingsType
6
  from diffusers import DiffusionPipeline
 
7
 
8
-
9
  model_name = os.environ.get('MODEL_NAME', 'UnfilteredAI/NSFW-gen-v2')
10
  pipe = DiffusionPipeline.from_pretrained(
11
  model_name,
@@ -14,18 +15,54 @@ pipe = DiffusionPipeline.from_pretrained(
14
  pipe.to('cuda')
15
 
16
  compel = Compel(
17
- tokenizer=[pipe.tokenizer, pipe.tokenizer_2] ,
18
- text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
19
- returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
20
- requires_pooled=[False, True]
21
  )
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  @spaces.GPU(duration=120)
25
- def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, width, height, num_samples):
26
- embeds, pooled = compel(prompt)
 
 
 
 
27
  neg_embeds, neg_pooled = compel(negative_prompt)
28
- return pipe(
 
 
29
  prompt_embeds=embeds,
30
  pooled_prompt_embeds=pooled,
31
  negative_prompt_embeds=neg_embeds,
@@ -36,18 +73,130 @@ def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, width
36
  height=height,
37
  num_images_per_prompt=num_samples
38
  ).images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
 
40
 
41
- gr.Interface(
42
- fn=generate,
43
- inputs=[
44
- gr.Text(label="Prompt"),
45
- gr.Text("(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn, (deformed | distorted | disfigured:1.3), bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers:1.4, disconnected limbs, blurry, amputation.", label="Negative Prompt"),
46
- gr.Number(60, label="Number inference steps"),
47
- gr.Number(7, label="Guidance scale"),
48
- gr.Number(1024, label="Width"),
49
- gr.Number(1024, label="Height"),
50
- gr.Number(7, label="# images"),
51
- ],
52
- outputs=gr.Gallery(),
53
- ).launch()
 
4
  import os
5
  from compel import Compel, ReturnedEmbeddingsType
6
  from diffusers import DiffusionPipeline
7
+ import requests
8
 
9
+ # Model setup
10
  model_name = os.environ.get('MODEL_NAME', 'UnfilteredAI/NSFW-gen-v2')
11
  pipe = DiffusionPipeline.from_pretrained(
12
  model_name,
 
15
  pipe.to('cuda')
16
 
17
  compel = Compel(
18
+ tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
19
+ text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
20
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
21
+ requires_pooled=[False, True]
22
  )
23
 
24
+ # Translation function
25
+ @spaces.GPU
26
+ def translate_albanian_to_english(text):
27
+ if not text.strip():
28
+ return ""
29
+ for attempt in range(2):
30
+ try:
31
+ response = requests.post(
32
+ "https://hal1993-mdftranslation1234567890abcdef1234567890-fc073a6.hf.space/v1/translate",
33
+ json={"from_language": "sq", "to_language": "en", "input_text": text},
34
+ headers={"accept": "application/json", "Content-Type": "application/json"},
35
+ timeout=5
36
+ )
37
+ response.raise_for_status()
38
+ translated = response.json().get("translate", "")
39
+ return translated
40
+ except Exception as e:
41
+ if attempt == 1:
42
+ raise gr.Error(f"Përkthimi dështoi: {str(e)}")
43
+ raise gr.Error("Përkthimi dështoi. Ju lutem provoni përsëri.")
44
+
45
+ # Aspect ratio function
46
+ def update_aspect_ratio(ratio):
47
+ if ratio == "1:1":
48
+ return 1024, 1024
49
+ elif ratio == "9:16":
50
+ return 576, 1024 # 1024 * 9/16 = 576
51
+ elif ratio == "16:9":
52
+ return 1024, 576 # 1024 * 9/16 = 576
53
+ return 1024, 1024
54
 
55
  @spaces.GPU(duration=120)
56
+ def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, width, height, num_samples, progress=gr.Progress(track_tqdm=True)):
57
+ # Translate Albanian prompt to English
58
+ final_prompt = translate_albanian_to_english(prompt.strip()) if prompt.strip() else ""
59
+
60
+ # Use Compel for prompt embeddings
61
+ embeds, pooled = compel(final_prompt)
62
  neg_embeds, neg_pooled = compel(negative_prompt)
63
+
64
+ # Run pipeline
65
+ images = pipe(
66
  prompt_embeds=embeds,
67
  pooled_prompt_embeds=pooled,
68
  negative_prompt_embeds=neg_embeds,
 
73
  height=height,
74
  num_images_per_prompt=num_samples
75
  ).images
76
+
77
+ # Return single image
78
+ return images[0]
79
+
80
+ # Gradio interface
81
+ def create_demo():
82
+ with gr.Blocks() as demo:
83
+ # CSS for compact layout, 320px gap, and download button scaling
84
+ gr.HTML("""
85
+ <style>
86
+ body::before {
87
+ content: "";
88
+ display: block;
89
+ height: 320px;
90
+ background-color: var(--body-background-fill);
91
+ }
92
+ button[aria-label="Fullscreen"], button[aria-label="Fullscreen"]:hover {
93
+ display: none !important;
94
+ visibility: hidden !important;
95
+ opacity: 0 !important;
96
+ pointer-events: none !important;
97
+ }
98
+ button[aria-label="Share"], button[aria-label="Share"]:hover {
99
+ display: none !important;
100
+ }
101
+ button[aria-label="Download"] {
102
+ transform: scale(3);
103
+ transform-origin: top right;
104
+ margin: 0 !important;
105
+ padding: 6px !important;
106
+ }
107
+ .constrained-container {
108
+ max-width: 600px;
109
+ margin: 0 auto;
110
+ }
111
+ </style>
112
+ """)
113
+
114
+ gr.Markdown("# Gjenero Imazhe")
115
+ gr.Markdown("Krijo një imazh të ri teper real bazuar në përshkrimin tënd")
116
+
117
+ with gr.Row():
118
+ with gr.Column(elem_classes="constrained-container"):
119
+ prompt = gr.Textbox(
120
+ label="Përshkrimi",
121
+ placeholder="Shkruani përshkrimin këtu"
122
+ )
123
+ aspect_ratio = gr.Radio(
124
+ choices=["9:16", "1:1", "16:9"],
125
+ value="1:1",
126
+ label="Raporti i Imazhit"
127
+ )
128
+ generate_button = gr.Button(value="Gjenero")
129
+ result_image = gr.Image(
130
+ label="Rezultati",
131
+ type="pil",
132
+ height=480,
133
+ width=480,
134
+ interactive=False,
135
+ elem_classes="constrained-container"
136
+ )
137
+ # Hidden components for processing
138
+ negative_prompt = gr.Textbox(
139
+ value="(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn, (deformed | distorted | disfigured:1.3), bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers:1.4, disconnected limbs, blurry, amputation.,
140
+ visible=False
141
+ )
142
+ num_inference_steps = gr.Slider(
143
+ value=60,
144
+ minimum=1,
145
+ maximum=100,
146
+ step=1,
147
+ visible=False
148
+ )
149
+ guidance_scale = gr.Slider(
150
+ value=7,
151
+ minimum=1,
152
+ maximum=20,
153
+ step=0.1,
154
+ visible=False
155
+ )
156
+ width_slider = gr.Slider(
157
+ value=1024,
158
+ minimum=256,
159
+ maximum=1536,
160
+ step=8,
161
+ visible=False
162
+ )
163
+ height_slider = gr.Slider(
164
+ value=1024,
165
+ minimum=256,
166
+ maximum=1536,
167
+ step=8,
168
+ visible=False
169
+ )
170
+ num_samples = gr.Slider(
171
+ value=1,
172
+ minimum=1,
173
+ maximum=1,
174
+ step=1,
175
+ visible=False
176
+ )
177
+
178
+ # Update hidden sliders based on aspect ratio
179
+ aspect_ratio.change(
180
+ fn=update_aspect_ratio,
181
+ inputs=[aspect_ratio],
182
+ outputs=[width_slider, height_slider],
183
+ queue=False
184
+ )
185
+
186
+ # Bind the generate button
187
+ inputs = [
188
+ prompt, negative_prompt, num_inference_steps, guidance_scale,
189
+ width_slider, height_slider, num_samples
190
+ ]
191
+ generate_button.click(
192
+ fn=generate,
193
+ inputs=inputs,
194
+ outputs=[result_image]
195
+ )
196
 
197
+ return demo
198
 
199
+ if __name__ == "__main__":
200
+ print(f"Gradio version: {gr.__version__}")
201
+ app = create_demo()
202
+ app.queue(max_size=12).launch(server_name='0.0.0.0')