File size: 1,231 Bytes
29c669a
 
ec2d0a6
29c669a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
{
  "cache_layout_version": 1,
  "created_at": "2026-01-23T07:16:30Z",
  "model_path": "/root/.cache/huggingface/hub/models--black-forest-labs--FLUX.2-klein-9B/snapshots/cd1bba5810fe2aba6666d9cf7352e25436426039",
  "compile_command": [
    "/usr/bin/python",
    "/app/tensorrt_llm/visual_gen/examples/flux2_klein_9b.py",
    "--model_path",
    "/root/.cache/huggingface/hub/models--black-forest-labs--FLUX.2-klein-9B/snapshots/cd1bba5810fe2aba6666d9cf7352e25436426039",
    "--height",
    "512",
    "--width",
    "1024",
    "--num_inference_steps",
    "4",
    "--num_images",
    "6",
    "--linear_type",
    "te-fp8-per-tensor",
    "--fallback_linear_type",
    "default",
    "--torch_compile_mode",
    "default",
    "--offload_text_encoder"
  ],
  "height": 512,
  "width": 1024,
  "num_inference_steps": 4,
  "num_images": 6,
  "linear_type": "te-fp8-per-tensor",
  "fallback_linear_type": "default",
  "torch_compile_mode": "default",
  "offload_text_encoder": true,
  "offload_vae": false,
  "disable_cuda_graph": false,
  "disable_teacache": false,
  "torch_version": "2.10.0a0+b4e4ee81d3.nv25.12",
  "cuda_version": "13.1",
  "device_name": "NVIDIA GeForce RTX 4090",
  "device_capability": [
    8,
    9
  ]
}