File size: 3,339 Bytes
6639970
 
 
 
c0b6319
6639970
c835434
 
6639970
 
 
 
 
 
 
 
 
 
 
cb75286
6639970
 
 
 
 
 
 
7fba92c
6639970
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d941965
6639970
 
4b611c2
6639970
c0b6319
6639970
 
 
 
 
695c552
359c160
ad5301d
359c160
ad5301d
359c160
ad5301d
359c160
c0b6319
6639970
d941965
359c160
ad5301d
359c160
6639970
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
{
    "name": "cuda_pytorch_bert",
    "backend": {
        "name": "pytorch",
        "version": "2.8.0",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "model": "google-bert/bert-base-uncased",
        "processor": "google-bert/bert-base-uncased",
        "task": "fill-mask",
        "library": "transformers",
        "model_type": "bert",
        "device": "cuda",
        "device_ids": "0",
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "tp_plan": null,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "allow_tf32": false,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 10,
        "duration": 10,
        "warmup_runs": 10,
        "input_shapes": {
            "batch_size": 1,
            "sequence_length": 128
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": false,
        "forward_kwargs": {},
        "generate_kwargs": {},
        "call_kwargs": {}
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": true,
        "device_isolation_action": "warn",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7R32",
        "cpu_count": 16,
        "cpu_ram_mb": 66697.248768,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-5.10.242-239.961.amzn2.x86_64-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.18",
        "gpu": [
            "NVIDIA A10G"
        ],
        "gpu_count": 1,
        "gpu_vram_mb": 24146608128,
        "optimum_benchmark_version": "0.7.0.dev0",
        "optimum_benchmark_commit": "b263af468a9e9a9a3d1449039849727352955646",
        "transformers_version": "4.56.2",
        "transformers_commit": "b263af468a9e9a9a3d1449039849727352955646",
        "accelerate_version": "1.10.1",
        "accelerate_commit": "b263af468a9e9a9a3d1449039849727352955646",
        "diffusers_version": "0.35.1",
        "diffusers_commit": "b263af468a9e9a9a3d1449039849727352955646",
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.20",
        "timm_commit": "b263af468a9e9a9a3d1449039849727352955646",
        "peft_version": "0.17.1",
        "peft_commit": "b263af468a9e9a9a3d1449039849727352955646"
    },
    "print_report": true,
    "log_report": true
}