saliacoel commited on
Commit
67b99e9
·
verified ·
1 Parent(s): 5180b76

Upload tensorrt_convert.py

Browse files
Files changed (1) hide show
  1. tensorrt_convert.py +650 -0
tensorrt_convert.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import sys
3
+ import os
4
+ import time
5
+ import comfy.model_management
6
+
7
+ import tensorrt as trt
8
+ import folder_paths
9
+ from tqdm import tqdm
10
+
11
+ # TODO:
12
+ # Make it more generic: less model specific code
13
+
14
+ # add output directory to tensorrt search path
15
+ if "tensorrt" in folder_paths.folder_names_and_paths:
16
+ folder_paths.folder_names_and_paths["tensorrt"][0].append(
17
+ os.path.join(folder_paths.get_output_directory(), "tensorrt")
18
+ )
19
+ folder_paths.folder_names_and_paths["tensorrt"][1].add(".engine")
20
+ else:
21
+ folder_paths.folder_names_and_paths["tensorrt"] = (
22
+ [os.path.join(folder_paths.get_output_directory(), "tensorrt")],
23
+ {".engine"},
24
+ )
25
+
26
+ class TQDMProgressMonitor(trt.IProgressMonitor):
27
+ def __init__(self):
28
+ trt.IProgressMonitor.__init__(self)
29
+ self._active_phases = {}
30
+ self._step_result = True
31
+ self.max_indent = 5
32
+
33
+ def phase_start(self, phase_name, parent_phase, num_steps):
34
+ leave = False
35
+ try:
36
+ if parent_phase is not None:
37
+ nbIndents = (
38
+ self._active_phases.get(parent_phase, {}).get(
39
+ "nbIndents", self.max_indent
40
+ )
41
+ + 1
42
+ )
43
+ if nbIndents >= self.max_indent:
44
+ return
45
+ else:
46
+ nbIndents = 0
47
+ leave = True
48
+ self._active_phases[phase_name] = {
49
+ "tq": tqdm(
50
+ total=num_steps, desc=phase_name, leave=leave, position=nbIndents
51
+ ),
52
+ "nbIndents": nbIndents,
53
+ "parent_phase": parent_phase,
54
+ }
55
+ except KeyboardInterrupt:
56
+ # The phase_start callback cannot directly cancel the build, so request the cancellation from within step_complete.
57
+ _step_result = False
58
+
59
+ def phase_finish(self, phase_name):
60
+ try:
61
+ if phase_name in self._active_phases.keys():
62
+ self._active_phases[phase_name]["tq"].update(
63
+ self._active_phases[phase_name]["tq"].total
64
+ - self._active_phases[phase_name]["tq"].n
65
+ )
66
+
67
+ parent_phase = self._active_phases[phase_name].get("parent_phase", None)
68
+ while parent_phase is not None:
69
+ self._active_phases[parent_phase]["tq"].refresh()
70
+ parent_phase = self._active_phases[parent_phase].get(
71
+ "parent_phase", None
72
+ )
73
+ if (
74
+ self._active_phases[phase_name]["parent_phase"]
75
+ in self._active_phases.keys()
76
+ ):
77
+ self._active_phases[
78
+ self._active_phases[phase_name]["parent_phase"]
79
+ ]["tq"].refresh()
80
+ del self._active_phases[phase_name]
81
+ pass
82
+ except KeyboardInterrupt:
83
+ _step_result = False
84
+
85
+ def step_complete(self, phase_name, step):
86
+ try:
87
+ if phase_name in self._active_phases.keys():
88
+ self._active_phases[phase_name]["tq"].update(
89
+ step - self._active_phases[phase_name]["tq"].n
90
+ )
91
+ return self._step_result
92
+ except KeyboardInterrupt:
93
+ # There is no need to propagate this exception to TensorRT. We can simply cancel the build.
94
+ return False
95
+
96
+
97
+ class TRT_MODEL_CONVERSION_BASE:
98
+ def __init__(self):
99
+ self.output_dir = folder_paths.get_output_directory()
100
+ self.temp_dir = folder_paths.get_temp_directory()
101
+ self.timing_cache_path = os.path.normpath(
102
+ os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "timing_cache.trt"))
103
+ )
104
+
105
+ RETURN_TYPES = ()
106
+ FUNCTION = "convert"
107
+ OUTPUT_NODE = True
108
+ CATEGORY = "TensorRT"
109
+
110
+ @classmethod
111
+ def INPUT_TYPES(s):
112
+ raise NotImplementedError
113
+
114
+ # Sets up the builder to use the timing cache file, and creates it if it does not already exist
115
+ def _setup_timing_cache(self, config: trt.IBuilderConfig):
116
+ buffer = b""
117
+ if os.path.exists(self.timing_cache_path):
118
+ with open(self.timing_cache_path, mode="rb") as timing_cache_file:
119
+ buffer = timing_cache_file.read()
120
+ print("Read {} bytes from timing cache.".format(len(buffer)))
121
+ else:
122
+ print("No timing cache found; Initializing a new one.")
123
+ timing_cache: trt.ITimingCache = config.create_timing_cache(buffer)
124
+ config.set_timing_cache(timing_cache, ignore_mismatch=True)
125
+
126
+ # Saves the config's timing cache to file
127
+ def _save_timing_cache(self, config: trt.IBuilderConfig):
128
+ timing_cache: trt.ITimingCache = config.get_timing_cache()
129
+ with open(self.timing_cache_path, "wb") as timing_cache_file:
130
+ timing_cache_file.write(memoryview(timing_cache.serialize()))
131
+
132
+ def _convert(
133
+ self,
134
+ model,
135
+ filename_prefix,
136
+ batch_size_min,
137
+ batch_size_opt,
138
+ batch_size_max,
139
+ height_min,
140
+ height_opt,
141
+ height_max,
142
+ width_min,
143
+ width_opt,
144
+ width_max,
145
+ context_min,
146
+ context_opt,
147
+ context_max,
148
+ num_video_frames,
149
+ is_static: bool,
150
+ ):
151
+ output_onnx = os.path.normpath(
152
+ os.path.join(
153
+ os.path.join(self.temp_dir, "{}".format(time.time())), "model.onnx"
154
+ )
155
+ )
156
+
157
+ comfy.model_management.unload_all_models()
158
+ comfy.model_management.load_models_gpu([model], force_patch_weights=True, force_full_load=True)
159
+ unet = model.model.diffusion_model
160
+
161
+ context_dim = model.model.model_config.unet_config.get("context_dim", None)
162
+ context_len = 77
163
+ context_len_min = context_len
164
+ y_dim = model.model.adm_channels
165
+ extra_input = {}
166
+ dtype = torch.float16
167
+
168
+ if isinstance(model.model, comfy.model_base.SD3): #SD3
169
+ context_embedder_config = model.model.model_config.unet_config.get("context_embedder_config", None)
170
+ if context_embedder_config is not None:
171
+ context_dim = context_embedder_config.get("params", {}).get("in_features", None)
172
+ context_len = 154 #NOTE: SD3 can have 77 or 154 depending on which text encoders are used, this is why context_len_min stays 77
173
+ elif isinstance(model.model, comfy.model_base.AuraFlow):
174
+ context_dim = 2048
175
+ context_len_min = 256
176
+ context_len = 256
177
+ elif isinstance(model.model, comfy.model_base.Flux):
178
+ context_dim = model.model.model_config.unet_config.get("context_in_dim", None)
179
+ context_len_min = 256
180
+ context_len = 256
181
+ y_dim = model.model.model_config.unet_config.get("vec_in_dim", None)
182
+ extra_input = {"guidance": ()}
183
+ dtype = torch.bfloat16
184
+
185
+ if context_dim is not None:
186
+ input_names = ["x", "timesteps", "context"]
187
+ output_names = ["h"]
188
+
189
+ dynamic_axes = {
190
+ "x": {0: "batch", 2: "height", 3: "width"},
191
+ "timesteps": {0: "batch"},
192
+ "context": {0: "batch", 1: "num_embeds"},
193
+ }
194
+
195
+ transformer_options = model.model_options['transformer_options'].copy()
196
+ if model.model.model_config.unet_config.get(
197
+ "use_temporal_resblock", False
198
+ ): # SVD
199
+ batch_size_min = num_video_frames * batch_size_min
200
+ batch_size_opt = num_video_frames * batch_size_opt
201
+ batch_size_max = num_video_frames * batch_size_max
202
+
203
+ class UNET(torch.nn.Module):
204
+ def forward(self, x, timesteps, context, y):
205
+ return self.unet(
206
+ x,
207
+ timesteps,
208
+ context,
209
+ y,
210
+ num_video_frames=self.num_video_frames,
211
+ transformer_options=self.transformer_options,
212
+ )
213
+
214
+ svd_unet = UNET()
215
+ svd_unet.num_video_frames = num_video_frames
216
+ svd_unet.unet = unet
217
+ svd_unet.transformer_options = transformer_options
218
+ unet = svd_unet
219
+ context_len_min = context_len = 1
220
+ else:
221
+ class UNET(torch.nn.Module):
222
+ def forward(self, x, timesteps, context, *args):
223
+ extras = input_names[3:]
224
+ extra_args = {}
225
+ for i in range(len(extras)):
226
+ extra_args[extras[i]] = args[i]
227
+ return self.unet(x, timesteps, context, transformer_options=self.transformer_options, **extra_args)
228
+
229
+ _unet = UNET()
230
+ _unet.unet = unet
231
+ _unet.transformer_options = transformer_options
232
+ unet = _unet
233
+
234
+ input_channels = model.model.model_config.unet_config.get("in_channels", 4)
235
+
236
+ inputs_shapes_min = (
237
+ (batch_size_min, input_channels, height_min // 8, width_min // 8),
238
+ (batch_size_min,),
239
+ (batch_size_min, context_len_min * context_min, context_dim),
240
+ )
241
+ inputs_shapes_opt = (
242
+ (batch_size_opt, input_channels, height_opt // 8, width_opt // 8),
243
+ (batch_size_opt,),
244
+ (batch_size_opt, context_len * context_opt, context_dim),
245
+ )
246
+ inputs_shapes_max = (
247
+ (batch_size_max, input_channels, height_max // 8, width_max // 8),
248
+ (batch_size_max,),
249
+ (batch_size_max, context_len * context_max, context_dim),
250
+ )
251
+
252
+ if y_dim > 0:
253
+ input_names.append("y")
254
+ dynamic_axes["y"] = {0: "batch"}
255
+ inputs_shapes_min += ((batch_size_min, y_dim),)
256
+ inputs_shapes_opt += ((batch_size_opt, y_dim),)
257
+ inputs_shapes_max += ((batch_size_max, y_dim),)
258
+
259
+ for k in extra_input:
260
+ input_names.append(k)
261
+ dynamic_axes[k] = {0: "batch"}
262
+ inputs_shapes_min += ((batch_size_min,) + extra_input[k],)
263
+ inputs_shapes_opt += ((batch_size_opt,) + extra_input[k],)
264
+ inputs_shapes_max += ((batch_size_max,) + extra_input[k],)
265
+
266
+
267
+ inputs = ()
268
+ for shape in inputs_shapes_opt:
269
+ inputs += (
270
+ torch.zeros(
271
+ shape,
272
+ device=comfy.model_management.get_torch_device(),
273
+ dtype=dtype,
274
+ ),
275
+ )
276
+
277
+ else:
278
+ print("ERROR: model not supported.")
279
+ return ()
280
+
281
+ os.makedirs(os.path.dirname(output_onnx), exist_ok=True)
282
+ torch.onnx.export(
283
+ unet,
284
+ inputs,
285
+ output_onnx,
286
+ verbose=False,
287
+ input_names=input_names,
288
+ output_names=output_names,
289
+ opset_version=17,
290
+ dynamo=False, # <— force legacy ONNX exporter, no torch.export/dynamic_shapes
291
+ )
292
+
293
+ comfy.model_management.unload_all_models()
294
+ comfy.model_management.soft_empty_cache()
295
+
296
+ # TRT conversion starts here
297
+ logger = trt.Logger(trt.Logger.INFO)
298
+ builder = trt.Builder(logger)
299
+
300
+ network = builder.create_network(
301
+ 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
302
+ )
303
+ parser = trt.OnnxParser(network, logger)
304
+ success = parser.parse_from_file(output_onnx)
305
+ for idx in range(parser.num_errors):
306
+ print(parser.get_error(idx))
307
+
308
+ if not success:
309
+ print("ONNX load ERROR")
310
+ return ()
311
+
312
+ config = builder.create_builder_config()
313
+ profile = builder.create_optimization_profile()
314
+ self._setup_timing_cache(config)
315
+ config.progress_monitor = TQDMProgressMonitor()
316
+
317
+ prefix_encode = ""
318
+ for k in range(len(input_names)):
319
+ min_shape = inputs_shapes_min[k]
320
+ opt_shape = inputs_shapes_opt[k]
321
+ max_shape = inputs_shapes_max[k]
322
+ profile.set_shape(input_names[k], min_shape, opt_shape, max_shape)
323
+
324
+ # Encode shapes to filename
325
+ encode = lambda a: ".".join(map(lambda x: str(x), a))
326
+ prefix_encode += "{}#{}#{}#{};".format(
327
+ input_names[k], encode(min_shape), encode(opt_shape), encode(max_shape)
328
+ )
329
+
330
+ if dtype == torch.float16:
331
+ config.set_flag(trt.BuilderFlag.FP16)
332
+ if dtype == torch.bfloat16:
333
+ config.set_flag(trt.BuilderFlag.BF16)
334
+
335
+ config.add_optimization_profile(profile)
336
+
337
+ if is_static:
338
+ filename_prefix = "{}_${}".format(
339
+ filename_prefix,
340
+ "-".join(
341
+ (
342
+ "stat",
343
+ "b",
344
+ str(batch_size_opt),
345
+ "h",
346
+ str(height_opt),
347
+ "w",
348
+ str(width_opt),
349
+ )
350
+ ),
351
+ )
352
+ else:
353
+ filename_prefix = "{}_${}".format(
354
+ filename_prefix,
355
+ "-".join(
356
+ (
357
+ "dyn",
358
+ "b",
359
+ str(batch_size_min),
360
+ str(batch_size_max),
361
+ str(batch_size_opt),
362
+ "h",
363
+ str(height_min),
364
+ str(height_max),
365
+ str(height_opt),
366
+ "w",
367
+ str(width_min),
368
+ str(width_max),
369
+ str(width_opt),
370
+ )
371
+ ),
372
+ )
373
+
374
+ serialized_engine = builder.build_serialized_network(network, config)
375
+
376
+ full_output_folder, filename, counter, subfolder, filename_prefix = (
377
+ folder_paths.get_save_image_path(filename_prefix, self.output_dir)
378
+ )
379
+ output_trt_engine = os.path.join(
380
+ full_output_folder, f"{filename}_{counter:05}_.engine"
381
+ )
382
+
383
+ with open(output_trt_engine, "wb") as f:
384
+ f.write(serialized_engine)
385
+
386
+ self._save_timing_cache(config)
387
+
388
+ return ()
389
+
390
+
391
+ class DYNAMIC_TRT_MODEL_CONVERSION(TRT_MODEL_CONVERSION_BASE):
392
+ def __init__(self):
393
+ super(DYNAMIC_TRT_MODEL_CONVERSION, self).__init__()
394
+
395
+ @classmethod
396
+ def INPUT_TYPES(s):
397
+ return {
398
+ "required": {
399
+ "model": ("MODEL",),
400
+ "filename_prefix": ("STRING", {"default": "tensorrt/ComfyUI_DYN"}),
401
+ "batch_size_min": (
402
+ "INT",
403
+ {
404
+ "default": 1,
405
+ "min": 1,
406
+ "max": 100,
407
+ "step": 1,
408
+ },
409
+ ),
410
+ "batch_size_opt": (
411
+ "INT",
412
+ {
413
+ "default": 1,
414
+ "min": 1,
415
+ "max": 100,
416
+ "step": 1,
417
+ },
418
+ ),
419
+ "batch_size_max": (
420
+ "INT",
421
+ {
422
+ "default": 1,
423
+ "min": 1,
424
+ "max": 100,
425
+ "step": 1,
426
+ },
427
+ ),
428
+ "height_min": (
429
+ "INT",
430
+ {
431
+ "default": 512,
432
+ "min": 256,
433
+ "max": 4096,
434
+ "step": 64,
435
+ },
436
+ ),
437
+ "height_opt": (
438
+ "INT",
439
+ {
440
+ "default": 512,
441
+ "min": 256,
442
+ "max": 4096,
443
+ "step": 64,
444
+ },
445
+ ),
446
+ "height_max": (
447
+ "INT",
448
+ {
449
+ "default": 512,
450
+ "min": 256,
451
+ "max": 4096,
452
+ "step": 64,
453
+ },
454
+ ),
455
+ "width_min": (
456
+ "INT",
457
+ {
458
+ "default": 512,
459
+ "min": 256,
460
+ "max": 4096,
461
+ "step": 64,
462
+ },
463
+ ),
464
+ "width_opt": (
465
+ "INT",
466
+ {
467
+ "default": 512,
468
+ "min": 256,
469
+ "max": 4096,
470
+ "step": 64,
471
+ },
472
+ ),
473
+ "width_max": (
474
+ "INT",
475
+ {
476
+ "default": 512,
477
+ "min": 256,
478
+ "max": 4096,
479
+ "step": 64,
480
+ },
481
+ ),
482
+ "context_min": (
483
+ "INT",
484
+ {
485
+ "default": 1,
486
+ "min": 1,
487
+ "max": 128,
488
+ "step": 1,
489
+ },
490
+ ),
491
+ "context_opt": (
492
+ "INT",
493
+ {
494
+ "default": 1,
495
+ "min": 1,
496
+ "max": 128,
497
+ "step": 1,
498
+ },
499
+ ),
500
+ "context_max": (
501
+ "INT",
502
+ {
503
+ "default": 1,
504
+ "min": 1,
505
+ "max": 128,
506
+ "step": 1,
507
+ },
508
+ ),
509
+ "num_video_frames": (
510
+ "INT",
511
+ {
512
+ "default": 14,
513
+ "min": 0,
514
+ "max": 1000,
515
+ "step": 1,
516
+ },
517
+ ),
518
+ },
519
+ }
520
+
521
+ def convert(
522
+ self,
523
+ model,
524
+ filename_prefix,
525
+ batch_size_min,
526
+ batch_size_opt,
527
+ batch_size_max,
528
+ height_min,
529
+ height_opt,
530
+ height_max,
531
+ width_min,
532
+ width_opt,
533
+ width_max,
534
+ context_min,
535
+ context_opt,
536
+ context_max,
537
+ num_video_frames,
538
+ ):
539
+ return super()._convert(
540
+ model,
541
+ filename_prefix,
542
+ batch_size_min,
543
+ batch_size_opt,
544
+ batch_size_max,
545
+ height_min,
546
+ height_opt,
547
+ height_max,
548
+ width_min,
549
+ width_opt,
550
+ width_max,
551
+ context_min,
552
+ context_opt,
553
+ context_max,
554
+ num_video_frames,
555
+ is_static=False,
556
+ )
557
+
558
+
559
+ class STATIC_TRT_MODEL_CONVERSION(TRT_MODEL_CONVERSION_BASE):
560
+ def __init__(self):
561
+ super(STATIC_TRT_MODEL_CONVERSION, self).__init__()
562
+
563
+ @classmethod
564
+ def INPUT_TYPES(s):
565
+ return {
566
+ "required": {
567
+ "model": ("MODEL",),
568
+ "filename_prefix": ("STRING", {"default": "tensorrt/ComfyUI_STAT"}),
569
+ "batch_size_opt": (
570
+ "INT",
571
+ {
572
+ "default": 1,
573
+ "min": 1,
574
+ "max": 100,
575
+ "step": 1,
576
+ },
577
+ ),
578
+ "height_opt": (
579
+ "INT",
580
+ {
581
+ "default": 512,
582
+ "min": 256,
583
+ "max": 4096,
584
+ "step": 64,
585
+ },
586
+ ),
587
+ "width_opt": (
588
+ "INT",
589
+ {
590
+ "default": 512,
591
+ "min": 256,
592
+ "max": 4096,
593
+ "step": 64,
594
+ },
595
+ ),
596
+ "context_opt": (
597
+ "INT",
598
+ {
599
+ "default": 1,
600
+ "min": 1,
601
+ "max": 128,
602
+ "step": 1,
603
+ },
604
+ ),
605
+ "num_video_frames": (
606
+ "INT",
607
+ {
608
+ "default": 14,
609
+ "min": 0,
610
+ "max": 1000,
611
+ "step": 1,
612
+ },
613
+ ),
614
+ },
615
+ }
616
+
617
+ def convert(
618
+ self,
619
+ model,
620
+ filename_prefix,
621
+ batch_size_opt,
622
+ height_opt,
623
+ width_opt,
624
+ context_opt,
625
+ num_video_frames,
626
+ ):
627
+ return super()._convert(
628
+ model,
629
+ filename_prefix,
630
+ batch_size_opt,
631
+ batch_size_opt,
632
+ batch_size_opt,
633
+ height_opt,
634
+ height_opt,
635
+ height_opt,
636
+ width_opt,
637
+ width_opt,
638
+ width_opt,
639
+ context_opt,
640
+ context_opt,
641
+ context_opt,
642
+ num_video_frames,
643
+ is_static=True,
644
+ )
645
+
646
+
647
+ NODE_CLASS_MAPPINGS = {
648
+ "DYNAMIC_TRT_MODEL_CONVERSION": DYNAMIC_TRT_MODEL_CONVERSION,
649
+ "STATIC_TRT_MODEL_CONVERSION": STATIC_TRT_MODEL_CONVERSION,
650
+ }