| import os |
| import numpy as np |
| from typing import Dict |
|
|
| from transformers import MaskFormerFeatureExtractor |
| |
| from io import BytesIO |
| from PIL import Image |
| import rawpy |
|
|
| import triton_python_backend_utils as pb_utils |
|
|
|
|
| class TritonPythonModel: |
| def initialize(self, args: Dict[str, str]) -> None: |
| """ |
| Initialize the tokenization process |
| :param args: arguments from Triton config file |
| """ |
| path: str = os.path.join(args["model_repository"], args["model_version"]) |
| self.processor = MaskFormerFeatureExtractor.from_pretrained(path) |
| |
| def execute(self, requests): |
| """ |
| Parameters |
| ---------- |
| requests : list |
| A list of pb_utils.InferenceRequest |
| |
| Returns |
| ------- |
| list |
| A list of pb_utils.InferenceResponse. The length of this list must |
| be the same as `requests` |
| """ |
| responses = [] |
| |
| for request in requests: |
| |
| |
| |
| |
| |
| |
| imgs = pb_utils.get_input_tensor_by_name(request, "IMAGE").as_numpy().tolist() |
| imgs = self.preprocess(imgs) |
| |
| |
| |
|
|
| |
| feature: Dict[str, np.ndarray] = self.processor(imgs, return_tensors='np') |
| outputs = pb_utils.Tensor('pixel_values', feature['pixel_values']) |
| inference_response = pb_utils.InferenceResponse(output_tensors=[outputs]) |
| responses.append(inference_response) |
| |
| return responses |
| |
|
|
| def preprocess(self, data): |
| print("images: ", data) |
| imgs = [] |
| for e in data: |
| buffer = BytesIO(e) |
| print("Type of data: ", type(buffer)) |
| _image = Image.open(buffer).convert('RGB') |
| print("Type of _image: ", type(_image)) |
| imgs.append(_image) |
| print("Load success") |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| return imgs |
| |
| def finalize(self) -> None: |
| """ |
| Finalize the model |
| """ |
| pass |