from typing import Dict, List, Any import torch import random from torch import autocast from diffusers import DiffusionPipeline import base64 from io import BytesIO # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): # load the optimized model self.pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V4.0") self.pipe = self.pipe.to(device) def __call__(self, data: Any) -> List[List[Dict[str, float]]]: """ Args: data (:obj:): includes the input data and the parameters for the inference. Return: A :obj:`dict`:. base64 encoded image """ inputs = data.pop("inputs", data) random_generators= [torch.Generator().manual_seed(i) for i in range(inputs["num_images_per_prompt"])] print(random_generators) # run inference pipeline with autocast(device.type): images = self.pipe(prompt=inputs["prompt"], negative_prompt=inputs["negative_prompt"], num_images_per_prompt=inputs["num_images_per_prompt"], guidance_scale=7.5, generator=random_generators).images # encode image as base 64 buffered = BytesIO() base64_images = [] for image in images: image.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()) base64_images.append(img_str.decode()) # postprocess the prediction return {"images": base64_images}