saliacoel commited on
Commit
d7c9f29
·
verified ·
1 Parent(s): abe7d46

Upload tensorrt_convert.py

Browse files
Files changed (1) hide show
  1. tensorrt_convert.py +651 -0
tensorrt_convert.py ADDED
@@ -0,0 +1,651 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import sys
3
+ import os
4
+ import time
5
+ import comfy.model_management
6
+
7
+ import tensorrt as trt
8
+ import folder_paths
9
+ from tqdm import tqdm
10
+
11
+ # TODO:
12
+ # Make it more generic: less model specific code
13
+
14
+ # add output directory to tensorrt search path
15
+ if "tensorrt" in folder_paths.folder_names_and_paths:
16
+ folder_paths.folder_names_and_paths["tensorrt"][0].append(
17
+ os.path.join(folder_paths.get_output_directory(), "tensorrt")
18
+ )
19
+ folder_paths.folder_names_and_paths["tensorrt"][1].add(".engine")
20
+ else:
21
+ folder_paths.folder_names_and_paths["tensorrt"] = (
22
+ [os.path.join(folder_paths.get_output_directory(), "tensorrt")],
23
+ {".engine"},
24
+ )
25
+
26
+ class TQDMProgressMonitor(trt.IProgressMonitor):
27
+ def __init__(self):
28
+ trt.IProgressMonitor.__init__(self)
29
+ self._active_phases = {}
30
+ self._step_result = True
31
+ self.max_indent = 5
32
+
33
+ def phase_start(self, phase_name, parent_phase, num_steps):
34
+ leave = False
35
+ try:
36
+ if parent_phase is not None:
37
+ nbIndents = (
38
+ self._active_phases.get(parent_phase, {}).get(
39
+ "nbIndents", self.max_indent
40
+ )
41
+ + 1
42
+ )
43
+ if nbIndents >= self.max_indent:
44
+ return
45
+ else:
46
+ nbIndents = 0
47
+ leave = True
48
+ self._active_phases[phase_name] = {
49
+ "tq": tqdm(
50
+ total=num_steps, desc=phase_name, leave=leave, position=nbIndents
51
+ ),
52
+ "nbIndents": nbIndents,
53
+ "parent_phase": parent_phase,
54
+ }
55
+ except KeyboardInterrupt:
56
+ # The phase_start callback cannot directly cancel the build, so request the cancellation from within step_complete.
57
+ _step_result = False
58
+
59
+ def phase_finish(self, phase_name):
60
+ try:
61
+ if phase_name in self._active_phases.keys():
62
+ self._active_phases[phase_name]["tq"].update(
63
+ self._active_phases[phase_name]["tq"].total
64
+ - self._active_phases[phase_name]["tq"].n
65
+ )
66
+
67
+ parent_phase = self._active_phases[phase_name].get("parent_phase", None)
68
+ while parent_phase is not None:
69
+ self._active_phases[parent_phase]["tq"].refresh()
70
+ parent_phase = self._active_phases[parent_phase].get(
71
+ "parent_phase", None
72
+ )
73
+ if (
74
+ self._active_phases[phase_name]["parent_phase"]
75
+ in self._active_phases.keys()
76
+ ):
77
+ self._active_phases[
78
+ self._active_phases[phase_name]["parent_phase"]
79
+ ]["tq"].refresh()
80
+ del self._active_phases[phase_name]
81
+ pass
82
+ except KeyboardInterrupt:
83
+ _step_result = False
84
+
85
+ def step_complete(self, phase_name, step):
86
+ try:
87
+ if phase_name in self._active_phases.keys():
88
+ self._active_phases[phase_name]["tq"].update(
89
+ step - self._active_phases[phase_name]["tq"].n
90
+ )
91
+ return self._step_result
92
+ except KeyboardInterrupt:
93
+ # There is no need to propagate this exception to TensorRT. We can simply cancel the build.
94
+ return False
95
+
96
+
97
+ class TRT_MODEL_CONVERSION_BASE:
98
+ def __init__(self):
99
+ self.output_dir = folder_paths.get_output_directory()
100
+ self.temp_dir = folder_paths.get_temp_directory()
101
+ self.timing_cache_path = os.path.normpath(
102
+ os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "timing_cache.trt"))
103
+ )
104
+
105
+ RETURN_TYPES = ()
106
+ FUNCTION = "convert"
107
+ OUTPUT_NODE = True
108
+ CATEGORY = "TensorRT"
109
+
110
+ @classmethod
111
+ def INPUT_TYPES(s):
112
+ raise NotImplementedError
113
+
114
+ # Sets up the builder to use the timing cache file, and creates it if it does not already exist
115
+ def _setup_timing_cache(self, config: trt.IBuilderConfig):
116
+ buffer = b""
117
+ if os.path.exists(self.timing_cache_path):
118
+ with open(self.timing_cache_path, mode="rb") as timing_cache_file:
119
+ buffer = timing_cache_file.read()
120
+ print("Read {} bytes from timing cache.".format(len(buffer)))
121
+ else:
122
+ print("No timing cache found; Initializing a new one.")
123
+ timing_cache: trt.ITimingCache = config.create_timing_cache(buffer)
124
+ config.set_timing_cache(timing_cache, ignore_mismatch=True)
125
+
126
+ # Saves the config's timing cache to file
127
+ def _save_timing_cache(self, config: trt.IBuilderConfig):
128
+ timing_cache: trt.ITimingCache = config.get_timing_cache()
129
+ with open(self.timing_cache_path, "wb") as timing_cache_file:
130
+ timing_cache_file.write(memoryview(timing_cache.serialize()))
131
+
132
+ def _convert(
133
+ self,
134
+ model,
135
+ filename_prefix,
136
+ batch_size_min,
137
+ batch_size_opt,
138
+ batch_size_max,
139
+ height_min,
140
+ height_opt,
141
+ height_max,
142
+ width_min,
143
+ width_opt,
144
+ width_max,
145
+ context_min,
146
+ context_opt,
147
+ context_max,
148
+ num_video_frames,
149
+ is_static: bool,
150
+ ):
151
+ output_onnx = os.path.normpath(
152
+ os.path.join(
153
+ os.path.join(self.temp_dir, "{}".format(time.time())), "model.onnx"
154
+ )
155
+ )
156
+
157
+ comfy.model_management.unload_all_models()
158
+ comfy.model_management.load_models_gpu([model], force_patch_weights=True, force_full_load=True)
159
+ unet = model.model.diffusion_model
160
+
161
+ context_dim = model.model.model_config.unet_config.get("context_dim", None)
162
+ context_len = 77
163
+ context_len_min = context_len
164
+ y_dim = model.model.adm_channels
165
+ extra_input = {}
166
+ dtype = torch.float16
167
+
168
+ if isinstance(model.model, comfy.model_base.SD3): #SD3
169
+ context_embedder_config = model.model.model_config.unet_config.get("context_embedder_config", None)
170
+ if context_embedder_config is not None:
171
+ context_dim = context_embedder_config.get("params", {}).get("in_features", None)
172
+ context_len = 154 #NOTE: SD3 can have 77 or 154 depending on which text encoders are used, this is why context_len_min stays 77
173
+ elif isinstance(model.model, comfy.model_base.AuraFlow):
174
+ context_dim = 2048
175
+ context_len_min = 256
176
+ context_len = 256
177
+ elif isinstance(model.model, comfy.model_base.Flux):
178
+ context_dim = model.model.model_config.unet_config.get("context_in_dim", None)
179
+ context_len_min = 256
180
+ context_len = 256
181
+ y_dim = model.model.model_config.unet_config.get("vec_in_dim", None)
182
+ extra_input = {"guidance": ()}
183
+ dtype = torch.bfloat16
184
+
185
+ if context_dim is not None:
186
+ input_names = ["x", "timesteps", "context"]
187
+ output_names = ["h"]
188
+
189
+ dynamic_axes = {
190
+ "x": {0: "batch", 2: "height", 3: "width"},
191
+ "timesteps": {0: "batch"},
192
+ "context": {0: "batch", 1: "num_embeds"},
193
+ }
194
+
195
+ transformer_options = model.model_options['transformer_options'].copy()
196
+ if model.model.model_config.unet_config.get(
197
+ "use_temporal_resblock", False
198
+ ): # SVD
199
+ batch_size_min = num_video_frames * batch_size_min
200
+ batch_size_opt = num_video_frames * batch_size_opt
201
+ batch_size_max = num_video_frames * batch_size_max
202
+
203
+ class UNET(torch.nn.Module):
204
+ def forward(self, x, timesteps, context, y):
205
+ return self.unet(
206
+ x,
207
+ timesteps,
208
+ context,
209
+ y,
210
+ num_video_frames=self.num_video_frames,
211
+ transformer_options=self.transformer_options,
212
+ )
213
+
214
+ svd_unet = UNET()
215
+ svd_unet.num_video_frames = num_video_frames
216
+ svd_unet.unet = unet
217
+ svd_unet.transformer_options = transformer_options
218
+ unet = svd_unet
219
+ context_len_min = context_len = 1
220
+ else:
221
+ class UNET(torch.nn.Module):
222
+ def forward(self, x, timesteps, context, *args):
223
+ extras = input_names[3:]
224
+ extra_args = {}
225
+ for i in range(len(extras)):
226
+ extra_args[extras[i]] = args[i]
227
+ return self.unet(x, timesteps, context, transformer_options=self.transformer_options, **extra_args)
228
+
229
+ _unet = UNET()
230
+ _unet.unet = unet
231
+ _unet.transformer_options = transformer_options
232
+ unet = _unet
233
+
234
+ input_channels = model.model.model_config.unet_config.get("in_channels", 4)
235
+
236
+ inputs_shapes_min = (
237
+ (batch_size_min, input_channels, height_min // 8, width_min // 8),
238
+ (batch_size_min,),
239
+ (batch_size_min, context_len_min * context_min, context_dim),
240
+ )
241
+ inputs_shapes_opt = (
242
+ (batch_size_opt, input_channels, height_opt // 8, width_opt // 8),
243
+ (batch_size_opt,),
244
+ (batch_size_opt, context_len * context_opt, context_dim),
245
+ )
246
+ inputs_shapes_max = (
247
+ (batch_size_max, input_channels, height_max // 8, width_max // 8),
248
+ (batch_size_max,),
249
+ (batch_size_max, context_len * context_max, context_dim),
250
+ )
251
+
252
+ if y_dim > 0:
253
+ input_names.append("y")
254
+ dynamic_axes["y"] = {0: "batch"}
255
+ inputs_shapes_min += ((batch_size_min, y_dim),)
256
+ inputs_shapes_opt += ((batch_size_opt, y_dim),)
257
+ inputs_shapes_max += ((batch_size_max, y_dim),)
258
+
259
+ for k in extra_input:
260
+ input_names.append(k)
261
+ dynamic_axes[k] = {0: "batch"}
262
+ inputs_shapes_min += ((batch_size_min,) + extra_input[k],)
263
+ inputs_shapes_opt += ((batch_size_opt,) + extra_input[k],)
264
+ inputs_shapes_max += ((batch_size_max,) + extra_input[k],)
265
+
266
+
267
+ inputs = ()
268
+ for shape in inputs_shapes_opt:
269
+ inputs += (
270
+ torch.zeros(
271
+ shape,
272
+ device=comfy.model_management.get_torch_device(),
273
+ dtype=dtype,
274
+ ),
275
+ )
276
+
277
+ else:
278
+ print("ERROR: model not supported.")
279
+ return ()
280
+
281
+ os.makedirs(os.path.dirname(output_onnx), exist_ok=True)
282
+ torch.onnx.export(
283
+ unet,
284
+ inputs,
285
+ output_onnx,
286
+ verbose=False,
287
+ input_names=input_names,
288
+ output_names=output_names,
289
+ opset_version=17,
290
+ dynamic_axes=dynamic_axes,
291
+ dynamo=False, # <— force legacy ONNX exporter, no torch.export/dynamic_shapes
292
+ )
293
+
294
+ comfy.model_management.unload_all_models()
295
+ comfy.model_management.soft_empty_cache()
296
+
297
+ # TRT conversion starts here
298
+ logger = trt.Logger(trt.Logger.INFO)
299
+ builder = trt.Builder(logger)
300
+
301
+ network = builder.create_network(
302
+ 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
303
+ )
304
+ parser = trt.OnnxParser(network, logger)
305
+ success = parser.parse_from_file(output_onnx)
306
+ for idx in range(parser.num_errors):
307
+ print(parser.get_error(idx))
308
+
309
+ if not success:
310
+ print("ONNX load ERROR")
311
+ return ()
312
+
313
+ config = builder.create_builder_config()
314
+ profile = builder.create_optimization_profile()
315
+ self._setup_timing_cache(config)
316
+ config.progress_monitor = TQDMProgressMonitor()
317
+
318
+ prefix_encode = ""
319
+ for k in range(len(input_names)):
320
+ min_shape = inputs_shapes_min[k]
321
+ opt_shape = inputs_shapes_opt[k]
322
+ max_shape = inputs_shapes_max[k]
323
+ profile.set_shape(input_names[k], min_shape, opt_shape, max_shape)
324
+
325
+ # Encode shapes to filename
326
+ encode = lambda a: ".".join(map(lambda x: str(x), a))
327
+ prefix_encode += "{}#{}#{}#{};".format(
328
+ input_names[k], encode(min_shape), encode(opt_shape), encode(max_shape)
329
+ )
330
+
331
+ if dtype == torch.float16:
332
+ config.set_flag(trt.BuilderFlag.FP16)
333
+ if dtype == torch.bfloat16:
334
+ config.set_flag(trt.BuilderFlag.BF16)
335
+
336
+ config.add_optimization_profile(profile)
337
+
338
+ if is_static:
339
+ filename_prefix = "{}_${}".format(
340
+ filename_prefix,
341
+ "-".join(
342
+ (
343
+ "stat",
344
+ "b",
345
+ str(batch_size_opt),
346
+ "h",
347
+ str(height_opt),
348
+ "w",
349
+ str(width_opt),
350
+ )
351
+ ),
352
+ )
353
+ else:
354
+ filename_prefix = "{}_${}".format(
355
+ filename_prefix,
356
+ "-".join(
357
+ (
358
+ "dyn",
359
+ "b",
360
+ str(batch_size_min),
361
+ str(batch_size_max),
362
+ str(batch_size_opt),
363
+ "h",
364
+ str(height_min),
365
+ str(height_max),
366
+ str(height_opt),
367
+ "w",
368
+ str(width_min),
369
+ str(width_max),
370
+ str(width_opt),
371
+ )
372
+ ),
373
+ )
374
+
375
+ serialized_engine = builder.build_serialized_network(network, config)
376
+
377
+ full_output_folder, filename, counter, subfolder, filename_prefix = (
378
+ folder_paths.get_save_image_path(filename_prefix, self.output_dir)
379
+ )
380
+ output_trt_engine = os.path.join(
381
+ full_output_folder, f"{filename}_{counter:05}_.engine"
382
+ )
383
+
384
+ with open(output_trt_engine, "wb") as f:
385
+ f.write(serialized_engine)
386
+
387
+ self._save_timing_cache(config)
388
+
389
+ return ()
390
+
391
+
392
+ class DYNAMIC_TRT_MODEL_CONVERSION(TRT_MODEL_CONVERSION_BASE):
393
+ def __init__(self):
394
+ super(DYNAMIC_TRT_MODEL_CONVERSION, self).__init__()
395
+
396
+ @classmethod
397
+ def INPUT_TYPES(s):
398
+ return {
399
+ "required": {
400
+ "model": ("MODEL",),
401
+ "filename_prefix": ("STRING", {"default": "tensorrt/ComfyUI_DYN"}),
402
+ "batch_size_min": (
403
+ "INT",
404
+ {
405
+ "default": 1,
406
+ "min": 1,
407
+ "max": 100,
408
+ "step": 1,
409
+ },
410
+ ),
411
+ "batch_size_opt": (
412
+ "INT",
413
+ {
414
+ "default": 1,
415
+ "min": 1,
416
+ "max": 100,
417
+ "step": 1,
418
+ },
419
+ ),
420
+ "batch_size_max": (
421
+ "INT",
422
+ {
423
+ "default": 1,
424
+ "min": 1,
425
+ "max": 100,
426
+ "step": 1,
427
+ },
428
+ ),
429
+ "height_min": (
430
+ "INT",
431
+ {
432
+ "default": 512,
433
+ "min": 256,
434
+ "max": 4096,
435
+ "step": 64,
436
+ },
437
+ ),
438
+ "height_opt": (
439
+ "INT",
440
+ {
441
+ "default": 512,
442
+ "min": 256,
443
+ "max": 4096,
444
+ "step": 64,
445
+ },
446
+ ),
447
+ "height_max": (
448
+ "INT",
449
+ {
450
+ "default": 512,
451
+ "min": 256,
452
+ "max": 4096,
453
+ "step": 64,
454
+ },
455
+ ),
456
+ "width_min": (
457
+ "INT",
458
+ {
459
+ "default": 512,
460
+ "min": 256,
461
+ "max": 4096,
462
+ "step": 64,
463
+ },
464
+ ),
465
+ "width_opt": (
466
+ "INT",
467
+ {
468
+ "default": 512,
469
+ "min": 256,
470
+ "max": 4096,
471
+ "step": 64,
472
+ },
473
+ ),
474
+ "width_max": (
475
+ "INT",
476
+ {
477
+ "default": 512,
478
+ "min": 256,
479
+ "max": 4096,
480
+ "step": 64,
481
+ },
482
+ ),
483
+ "context_min": (
484
+ "INT",
485
+ {
486
+ "default": 1,
487
+ "min": 1,
488
+ "max": 128,
489
+ "step": 1,
490
+ },
491
+ ),
492
+ "context_opt": (
493
+ "INT",
494
+ {
495
+ "default": 1,
496
+ "min": 1,
497
+ "max": 128,
498
+ "step": 1,
499
+ },
500
+ ),
501
+ "context_max": (
502
+ "INT",
503
+ {
504
+ "default": 1,
505
+ "min": 1,
506
+ "max": 128,
507
+ "step": 1,
508
+ },
509
+ ),
510
+ "num_video_frames": (
511
+ "INT",
512
+ {
513
+ "default": 14,
514
+ "min": 0,
515
+ "max": 1000,
516
+ "step": 1,
517
+ },
518
+ ),
519
+ },
520
+ }
521
+
522
+ def convert(
523
+ self,
524
+ model,
525
+ filename_prefix,
526
+ batch_size_min,
527
+ batch_size_opt,
528
+ batch_size_max,
529
+ height_min,
530
+ height_opt,
531
+ height_max,
532
+ width_min,
533
+ width_opt,
534
+ width_max,
535
+ context_min,
536
+ context_opt,
537
+ context_max,
538
+ num_video_frames,
539
+ ):
540
+ return super()._convert(
541
+ model,
542
+ filename_prefix,
543
+ batch_size_min,
544
+ batch_size_opt,
545
+ batch_size_max,
546
+ height_min,
547
+ height_opt,
548
+ height_max,
549
+ width_min,
550
+ width_opt,
551
+ width_max,
552
+ context_min,
553
+ context_opt,
554
+ context_max,
555
+ num_video_frames,
556
+ is_static=False,
557
+ )
558
+
559
+
560
+ class STATIC_TRT_MODEL_CONVERSION(TRT_MODEL_CONVERSION_BASE):
561
+ def __init__(self):
562
+ super(STATIC_TRT_MODEL_CONVERSION, self).__init__()
563
+
564
+ @classmethod
565
+ def INPUT_TYPES(s):
566
+ return {
567
+ "required": {
568
+ "model": ("MODEL",),
569
+ "filename_prefix": ("STRING", {"default": "tensorrt/ComfyUI_STAT"}),
570
+ "batch_size_opt": (
571
+ "INT",
572
+ {
573
+ "default": 1,
574
+ "min": 1,
575
+ "max": 100,
576
+ "step": 1,
577
+ },
578
+ ),
579
+ "height_opt": (
580
+ "INT",
581
+ {
582
+ "default": 512,
583
+ "min": 256,
584
+ "max": 4096,
585
+ "step": 64,
586
+ },
587
+ ),
588
+ "width_opt": (
589
+ "INT",
590
+ {
591
+ "default": 512,
592
+ "min": 256,
593
+ "max": 4096,
594
+ "step": 64,
595
+ },
596
+ ),
597
+ "context_opt": (
598
+ "INT",
599
+ {
600
+ "default": 1,
601
+ "min": 1,
602
+ "max": 128,
603
+ "step": 1,
604
+ },
605
+ ),
606
+ "num_video_frames": (
607
+ "INT",
608
+ {
609
+ "default": 14,
610
+ "min": 0,
611
+ "max": 1000,
612
+ "step": 1,
613
+ },
614
+ ),
615
+ },
616
+ }
617
+
618
+ def convert(
619
+ self,
620
+ model,
621
+ filename_prefix,
622
+ batch_size_opt,
623
+ height_opt,
624
+ width_opt,
625
+ context_opt,
626
+ num_video_frames,
627
+ ):
628
+ return super()._convert(
629
+ model,
630
+ filename_prefix,
631
+ batch_size_opt,
632
+ batch_size_opt,
633
+ batch_size_opt,
634
+ height_opt,
635
+ height_opt,
636
+ height_opt,
637
+ width_opt,
638
+ width_opt,
639
+ width_opt,
640
+ context_opt,
641
+ context_opt,
642
+ context_opt,
643
+ num_video_frames,
644
+ is_static=True,
645
+ )
646
+
647
+
648
+ NODE_CLASS_MAPPINGS = {
649
+ "DYNAMIC_TRT_MODEL_CONVERSION": DYNAMIC_TRT_MODEL_CONVERSION,
650
+ "STATIC_TRT_MODEL_CONVERSION": STATIC_TRT_MODEL_CONVERSION,
651
+ }