| | from typing import Dict, List, Any |
| | import torch |
| | from torch import autocast |
| | from diffusers import StableDiffusionPipeline |
| | import base64 |
| | from io import BytesIO |
| |
|
| | |
| | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| |
|
| | if device.type != 'cuda': |
| | raise ValueError("need to run on GPU") |
| |
|
| | class EndpointHandler(): |
| | def __init__(self, path=""): |
| | |
| | print("Loading model from:", path) |
| | self.pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) |
| | self.pipe = self.pipe.to(device) |
| | self.pipe.safety_checker = lambda images, clip_input: (images, None) |
| |
|
| |
|
| | def __call__(self, data: Any) -> List[List[Dict[str, float]]]: |
| | """ |
| | Args: |
| | data (:obj:): |
| | includes the input data and the parameters for the inference. |
| | Return: |
| | A :obj:`dict`:. base64 encoded image |
| | """ |
| | inputs = data.pop("inputs", data) |
| |
|
| | print("Running inference with data:", data) |
| | print("Running inference with inputs:", inputs) |
| |
|
| | out = self.pipe( |
| | inputs, |
| | guidance_scale = 5, |
| | num_images_per_prompt = 1, |
| | ) |
| |
|
| | return out.images[0]; |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |