File size: 2,962 Bytes
0ec106b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os
import numpy as np
from typing import Dict

from transformers import MaskFormerFeatureExtractor
# from refactor.utils.assets import ImageAsset
from io import BytesIO
from PIL import Image
import rawpy

import triton_python_backend_utils as pb_utils


class TritonPythonModel:
    def initialize(self, args: Dict[str, str]) -> None:
        """
        Initialize the tokenization process
        :param args: arguments from Triton config file
        """
        path: str = os.path.join(args["model_repository"], args["model_version"])
        self.processor = MaskFormerFeatureExtractor.from_pretrained(path)
        
    def execute(self, requests):
        """
        Parameters
        ----------
        requests : list
          A list of pb_utils.InferenceRequest

        Returns
        -------
        list
          A list of pb_utils.InferenceResponse. The length of this list must
          be the same as `requests`
        """
        responses = []
        # loop batch request
        for request in requests:
            # Get INPUT
            # img = [
            #     self.preprocess(t)
            #     for t in pb_utils.get_input_tensor_by_name(request, "IMAGE")
            #     .as_numpy()
            # ]
            imgs = pb_utils.get_input_tensor_by_name(request, "IMAGE").as_numpy().tolist()
            imgs = self.preprocess(imgs)
            # imgs = [Image.open(BytesIO(img)) for img in imgs]
            # img = ImageAsset(inp).image
            # img = [self.preprocess(e) for e in inp]

            # Get EXTRACTFEATURE
            feature: Dict[str, np.ndarray] = self.processor(imgs, return_tensors='np')
            outputs = pb_utils.Tensor('pixel_values', feature['pixel_values'])
            inference_response = pb_utils.InferenceResponse(output_tensors=[outputs])
            responses.append(inference_response)
            
        return responses
    

    def preprocess(self, data):
      print("images: ", data)
      imgs = []
      for e in data:
        buffer = BytesIO(e)
        print("Type of data: ", type(buffer))
        _image = Image.open(buffer).convert('RGB')
        print("Type of _image: ", type(_image))
        imgs.append(_image)
      print("Load success")
      # try:
      #   with rawpy.imread(buffer) as raw:
      #     thumb = raw.extract_thumb()
      #     print("Load success: ",thumb)
      #   if thumb.format == rawpy.ThumbFormat.JPEG or thumb.format == rawpy.ThumbFormat.PNG:
      #     _image = Image.open(thumb.data)
      #     print("PNG Type of _image: ", type(_image))
      #   elif thumb.format == rawpy.ThumbFormat.BITMAP:
      #     _image = Image.fromarray(thumb.data)
      #     print("bitmap Type of _image: ", type(_image))
      # except:
      #   raise AssertionError(f'Cannot load image asset from {str(buffer):20s}')
      return imgs
      
    def finalize(self) -> None:
        """
        Finalize the model
        """
        pass