enesbol commited on
Commit
74c1654
·
1 Parent(s): b4b6e32

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +85 -85
handler.py CHANGED
@@ -13,6 +13,89 @@ class EndpointHandler():
13
  self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None)
14
  self.pipe.to("cuda")
15
  self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  def build_prompt(self, text_prompt, color_code):
18
  color_name = self.hex_to_name(color_code)
@@ -48,6 +131,7 @@ class EndpointHandler():
48
  return mapped_color
49
 
50
  # Helpers.
 
51
  def draw_text(img, text, font=cv2.FONT_HERSHEY_PLAIN, pos=(20, 45), font_scale=1, font_thickness=1, text_color=(0, 0, 255)):
52
  x, y = pos
53
  text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness)
@@ -247,21 +331,18 @@ class EndpointHandler():
247
  def create_image_template(
248
  base_image_path,
249
  logo_path,
250
-
251
  punchline_text,
252
  punchline_text_color="#008000",
253
  punchline_text_max_width=550,
254
  spacing_image_text=0,
255
-
256
  button_color="#008000",
257
  button_text="Call Action Text Here! >",
258
  button_font=cv2.FONT_HERSHEY_TRIPLEX,
259
  button_font_scale=0.7,
260
  button_font_thickness=1,
261
  button_text_color="#FFFFFF",
262
-
263
  spacing_between_punchline_and_button=10,
264
- corner_radius=30,):
265
 
266
 
267
 
@@ -303,88 +384,7 @@ class EndpointHandler():
303
  return result_template
304
 
305
 
306
- def __call__(self, data):
307
-
308
- info=data['inputs']
309
-
310
- # Image + Logo
311
- image=info.pop("image",data)
312
- logo = info.pop("logo",data)
313
- logo = base64.b64decode(logo)
314
- logo = Image.open(BytesIO(logo)).convert('RGB')
315
-
316
- # Seed
317
- seed = info.pop("seed", data)
318
-
319
- # Punchline Text
320
- punchline_text_max_width=info.pop("punchline_text_max_width", data.get("punchline_text_max_width", 550))
321
- punchline_text_color=info.pop("punchline_text_color", data.get("punchline_text_color", "#008000"))
322
- spacing_image_text=info.pop("spacing_image_text", data.get("spacing_image_text", 0))
323
-
324
-
325
- # color code:
326
- color_code=info.pop("color_code",data)
327
- # inference steps:
328
- num_inference_steps = info.pop("num_inference_steps", data.get("num_inference_steps", 40))
329
- # Image guidance scale
330
- image_guidance_scale = info.pop("image_guidance_scale", data.get("image_guidance_scale", 1.5))
331
- # Guidance scale
332
- guidance_scale = info.pop("guidance_scale", data.get("guidance_scale", 7.5))
333
-
334
-
335
- # Button color
336
- button_color = info.pop("button_color", data.get("button_color", "#008000"))
337
- # Button text
338
- button_text = info.pop("button_text", data.get("button_text"))
339
- # Button font
340
- button_font = info.pop("button_font", data.get("button_font", cv2.FONT_HERSHEY_TRIPLEX))
341
- # Button font scale
342
- button_font_scale = info.pop("button_font_scale", data.get("button_font_scale", 0.75))
343
- button_font_thickness = info.pop("button_font_thickness", data.get("button_font_thickness", 1))
344
- button_text_color = info.pop("button_text_color", data.get("button_text_color", "#FFFFFF"))
345
- spacing_between_punchline_and_button = info.pop("spacing_between_punchline_and_button", data.get("spacing_between_punchline_and_button", 10))
346
-
347
- # prompt
348
- text_prompt=info.pop("prompt",data)
349
 
350
-
351
- # image
352
- image=base64.b64decode(image)
353
- raw_images = Image.open(BytesIO(image)).convert('RGB')
354
- raw_images = raw_images.convert("RGB")
355
- raw_images = raw_images.resize((512, 512))
356
-
357
- result_prompt, negative_prompt = self.build_prompt(text_prompt, color_code)
358
-
359
- torch.manual_seed(seed)
360
- images = self.pipe(result_prompt, negative_prompt = negative_prompt, image=raw_images, num_inference_steps=num_inference_steps, guidance_scale = guidance_scale, image_guidance_scale = image_guidance_scale).images
361
- img=images[0]
362
- img.save("./1.png")
363
- logo.save("./logo.png")
364
-
365
- resulting_template = create_image_template(
366
- base_image_path="./1.png",
367
- logo_path="./logo.png",
368
- punchline_text=punchline_text,
369
- punchline_text_color=punchline_text_color,
370
- punchline_text_max_width=punchline_text_max_width,
371
- spacing_image_text=spacing_image_text,
372
- button_color=button_color,
373
- button_text=button_text,
374
- button_font=button_font,
375
- button_font_scale=button_font_scale,
376
- button_font_thickness=button_font_thickness,
377
- button_text_color=button_text_color,
378
- spacing_between_punchline_and_button=spacing_between_punchline_and_button,
379
- corner_radius=30)
380
-
381
-
382
-
383
- resulting_template.save("./result.png")
384
-
385
- with open('./result.png','rb') as img_file:
386
- encoded_string = base64.b64encode(img_file.read()).decode('utf-8')
387
- return {'image':encoded_string}
388
 
389
 
390
 
 
13
  self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None)
14
  self.pipe.to("cuda")
15
  self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
16
+
17
+ def __call__(self, data):
18
+
19
+ info=data['inputs']
20
+
21
+ # Image + Logo
22
+ image=info.pop("image",data)
23
+ logo = info.pop("logo",data)
24
+ logo = base64.b64decode(logo)
25
+ logo = Image.open(BytesIO(logo)).convert('RGB')
26
+
27
+ # Seed
28
+ seed = info.pop("seed", data)
29
+
30
+ # Punchline Text
31
+ punchline_text_max_width=info.pop("punchline_text_max_width", data.get("punchline_text_max_width", 550))
32
+ punchline_text_color=info.pop("punchline_text_color", data.get("punchline_text_color", "#008000"))
33
+ spacing_image_text=info.pop("spacing_image_text", data.get("spacing_image_text", 0))
34
+
35
+
36
+ # color code:
37
+ color_code=info.pop("color_code",data)
38
+ # inference steps:
39
+ num_inference_steps = info.pop("num_inference_steps", data.get("num_inference_steps", 40))
40
+ # Image guidance scale
41
+ image_guidance_scale = info.pop("image_guidance_scale", data.get("image_guidance_scale", 1.5))
42
+ # Guidance scale
43
+ guidance_scale = info.pop("guidance_scale", data.get("guidance_scale", 7.5))
44
+
45
+
46
+ # Button color
47
+ button_color = info.pop("button_color", data.get("button_color", "#008000"))
48
+ # Button text
49
+ button_text = info.pop("button_text", data.get("button_text"))
50
+ # Button font
51
+ button_font = info.pop("button_font", data.get("button_font", cv2.FONT_HERSHEY_TRIPLEX))
52
+ # Button font scale
53
+ button_font_scale = info.pop("button_font_scale", data.get("button_font_scale", 0.75))
54
+ button_font_thickness = info.pop("button_font_thickness", data.get("button_font_thickness", 1))
55
+ button_text_color = info.pop("button_text_color", data.get("button_text_color", "#FFFFFF"))
56
+ spacing_between_punchline_and_button = info.pop("spacing_between_punchline_and_button", data.get("spacing_between_punchline_and_button", 10))
57
+
58
+ # prompt
59
+ text_prompt=info.pop("prompt",data)
60
+
61
+
62
+ # image
63
+ image=base64.b64decode(image)
64
+ raw_images = Image.open(BytesIO(image)).convert('RGB')
65
+ raw_images = raw_images.convert("RGB")
66
+ raw_images = raw_images.resize((512, 512))
67
+
68
+ result_prompt, negative_prompt = self.build_prompt(text_prompt, color_code)
69
+
70
+ torch.manual_seed(seed)
71
+ images = self.pipe(result_prompt, negative_prompt = negative_prompt, image=raw_images, num_inference_steps=num_inference_steps, guidance_scale = guidance_scale, image_guidance_scale = image_guidance_scale).images
72
+ img=images[0]
73
+ img.save("./1.png")
74
+ logo.save("./logo.png")
75
+
76
+ resulting_template = self.create_image_template(
77
+ base_image_path="./1.png",
78
+ logo_path="./logo.png",
79
+ punchline_text=punchline_text,
80
+ punchline_text_color=punchline_text_color,
81
+ punchline_text_max_width=punchline_text_max_width,
82
+ spacing_image_text=spacing_image_text,
83
+ button_color=button_color,
84
+ button_text=button_text,
85
+ button_font=button_font,
86
+ button_font_scale=button_font_scale,
87
+ button_font_thickness=button_font_thickness,
88
+ button_text_color=button_text_color,
89
+ spacing_between_punchline_and_button=spacing_between_punchline_and_button,
90
+ corner_radius=30)
91
+
92
+
93
+
94
+ resulting_template.save("./result.png")
95
+
96
+ with open('./result.png','rb') as img_file:
97
+ encoded_string = base64.b64encode(img_file.read()).decode('utf-8')
98
+ return {'image':encoded_string}
99
 
100
  def build_prompt(self, text_prompt, color_code):
101
  color_name = self.hex_to_name(color_code)
 
131
  return mapped_color
132
 
133
  # Helpers.
134
+
135
  def draw_text(img, text, font=cv2.FONT_HERSHEY_PLAIN, pos=(20, 45), font_scale=1, font_thickness=1, text_color=(0, 0, 255)):
136
  x, y = pos
137
  text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness)
 
331
  def create_image_template(
332
  base_image_path,
333
  logo_path,
 
334
  punchline_text,
335
  punchline_text_color="#008000",
336
  punchline_text_max_width=550,
337
  spacing_image_text=0,
 
338
  button_color="#008000",
339
  button_text="Call Action Text Here! >",
340
  button_font=cv2.FONT_HERSHEY_TRIPLEX,
341
  button_font_scale=0.7,
342
  button_font_thickness=1,
343
  button_text_color="#FFFFFF",
 
344
  spacing_between_punchline_and_button=10,
345
+ corner_radius=30):
346
 
347
 
348
 
 
384
  return result_template
385
 
386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
 
389
 
390