Spaces:
Running
Running
| import base64 | |
| from io import BytesIO | |
| import torch | |
| from utils import pipes, tiling, wallpaper, images | |
| default_desired_output_width = 1024 | |
| default_text2img_inference_steps = 50 | |
| default_text2img_guidance_scale = 7.14 | |
| default_img2img_inference_steps = 50 | |
| default_img2img_strength = 0.6 | |
| class EndpointHandler: | |
| def __init__(self, path=""): | |
| self.sdxl_pipe = pipes.create_stable_diffusion_xl_pipeline("cuda:0", enable_tiling=False) | |
| self.sdxl_img2img_pipe = pipes.create_stable_diffusion_xl_img2img_pipe("cuda:1") | |
| def __call__(self, data): | |
| self.sdxl_pipe.to("cuda:0") | |
| self.sdxl_img2img_pipe.to("cuda:1") | |
| prompt = data.pop("inputs", data) | |
| desired_output_width = int(data.get("desired_output_width", default_desired_output_width)) | |
| text2img_inference_steps = int(data.get("text2img_inference_steps", default_text2img_inference_steps)) | |
| img2img_inference_steps = int(data.get("img2img_inference_steps", default_img2img_inference_steps)) | |
| text2img_guidance_scale = float(data.get("text2img_guidance_scale", default_text2img_guidance_scale)) | |
| img2img_strength = float(data.get("img2img_strength", default_img2img_strength)) | |
| print( | |
| f"Prompt: {prompt}, Desired Output Width: {desired_output_width}, Text2Img Inference Steps: {text2img_inference_steps}, Img2Img Inference Steps: {img2img_inference_steps}, Text2Img Guidance Scale: {text2img_guidance_scale}, Img2Img Strength: {img2img_strength}") | |
| original_image_size = tiling.compute_input_tile_width_for_desired_output(desired_output_width) | |
| original_image = \ | |
| self.sdxl_pipe(prompt=prompt, num_inference_steps=text2img_inference_steps, width=original_image_size, | |
| height=original_image_size, guidance_scale=text2img_guidance_scale).images[0] | |
| tiled_canvas = wallpaper.prepare_for_diamond_drop(original_image) | |
| inner_rotated_tile = images.extract_larger_inner_rotated_tile_from_image(tiled_canvas, original_image_size, | |
| original_image_size) | |
| half_drop = wallpaper.convert_tile_to_half_drop(inner_rotated_tile) | |
| half_drop_buffer = BytesIO() | |
| half_drop.save(half_drop_buffer, format="PNG") | |
| half_drop_string = base64.b64encode(half_drop_buffer.getvalue()) | |
| torch.cuda.empty_cache() | |
| return {"image": half_drop_string.decode()} | |