Upload of 5 reports of LLM code generation and optimization

#35
by Saauan - opened
data/2025-04-09T15:57:00_a7e29da1-f798-442a-a9b4-0202a6ab34ae.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "a7e29da1-f798-442a-a9b4-0202a6ab34ae",
6
+ "reportStatus": "draft",
7
+ "reportDatetime": "2025-08-19T15:12:04.349137",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 3503,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3"
23
+ }
24
+ ],
25
+ "dataset": [
26
+ {
27
+ "dataUsage": "input",
28
+ "dataType": "token",
29
+ "dataQuantity": 909689,
30
+ "source": "public",
31
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
32
+ "owner": "evalplus"
33
+ },
34
+ {
35
+ "dataUsage": "output",
36
+ "dataType": "token",
37
+ "dataQuantity": 688216.0,
38
+ "source": "public",
39
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
40
+ "owner": "evalplus"
41
+ }
42
+ ],
43
+ "taskDescription": "Code optimization - Continuous batching"
44
+ },
45
+ "measures": [
46
+ {
47
+ "measurementMethod": "perf",
48
+ "version": "5.10.237",
49
+ "cpuTrackingMode": "rapl",
50
+ "powerCalibrationMeasurement": 0.0005168527777777778,
51
+ "durationCalibrationMeasurement": 30.1,
52
+ "powerConsumption": 0.016511080555555554,
53
+ "measurementDuration": 613.8,
54
+ "measurementDateTime": "2025-04-09T15:57:00"
55
+ },
56
+ {
57
+ "measurementMethod": "nvidia-smi",
58
+ "version": "v535.183.06",
59
+ "gpuTrackingMode": "nvml",
60
+ "powerCalibrationMeasurement": 0.002210708333333333,
61
+ "durationCalibrationMeasurement": 30.1,
62
+ "powerConsumption": 0.1554892972222222,
63
+ "measurementDuration": 613.8,
64
+ "measurementDateTime": "2025-04-09T15:57:00"
65
+ }
66
+ ],
67
+ "system": {
68
+ "os": "Linux",
69
+ "distribution": "Debian",
70
+ "distributionVersion": "11"
71
+ },
72
+ "software": {
73
+ "language": "python",
74
+ "version": "3.12.5"
75
+ },
76
+ "infrastructure": {
77
+ "infraType": "publicCloud",
78
+ "cloudProvider": "grid5000",
79
+ "cloudInstance": "chuc",
80
+ "components": [
81
+ {
82
+ "componentName": "Nvidia A100-SXM4-40GB",
83
+ "componentType": "gpu",
84
+ "nbComponent": 4,
85
+ "memorySize": 40,
86
+ "manufacturer": "Nvidia",
87
+ "share": 1
88
+ },
89
+ {
90
+ "componentName": "AMD EPYC 7513 (Zen 3)",
91
+ "componentType": "cpu",
92
+ "nbComponent": 1,
93
+ "manufacturer": "AMD",
94
+ "share": 1
95
+ },
96
+ {
97
+ "componentName": "ram",
98
+ "componentType": "ram",
99
+ "nbComponent": 1,
100
+ "memorySize": 512
101
+ }
102
+ ]
103
+ },
104
+ "environment": {
105
+ "country": "france",
106
+ "location": "lille"
107
+ },
108
+ "quality": "high"
109
+ }
data/2025-04-09T15:57:00_e27a588f-2bd5-45f9-9845-8d0410a704bf.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "e27a588f-2bd5-45f9-9845-8d0410a704bf",
6
+ "reportStatus": "draft",
7
+ "reportDatetime": "2025-08-19T15:12:04.348568",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 3856,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3"
23
+ }
24
+ ],
25
+ "dataset": [
26
+ {
27
+ "dataUsage": "input",
28
+ "dataType": "token",
29
+ "dataQuantity": 1008673,
30
+ "source": "public",
31
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
32
+ "owner": "evalplus"
33
+ },
34
+ {
35
+ "dataUsage": "output",
36
+ "dataType": "token",
37
+ "dataQuantity": 800860.0,
38
+ "source": "public",
39
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
40
+ "owner": "evalplus"
41
+ }
42
+ ],
43
+ "taskDescription": "Code optimization - Continuous batching"
44
+ },
45
+ "measures": [
46
+ {
47
+ "measurementMethod": "perf",
48
+ "version": "5.10.237",
49
+ "cpuTrackingMode": "rapl",
50
+ "powerCalibrationMeasurement": 0.0005146861111111111,
51
+ "durationCalibrationMeasurement": 30.0,
52
+ "powerConsumption": 0.017565705555555557,
53
+ "measurementDuration": 650.6,
54
+ "measurementDateTime": "2025-04-09T15:57:00"
55
+ },
56
+ {
57
+ "measurementMethod": "nvidia-smi",
58
+ "version": "v535.183.06",
59
+ "gpuTrackingMode": "nvml",
60
+ "powerCalibrationMeasurement": 0.0022279555555555553,
61
+ "durationCalibrationMeasurement": 30.0,
62
+ "powerConsumption": 0.16416904166666668,
63
+ "measurementDuration": 650.6,
64
+ "measurementDateTime": "2025-04-09T15:57:00"
65
+ }
66
+ ],
67
+ "system": {
68
+ "os": "Linux",
69
+ "distribution": "Debian",
70
+ "distributionVersion": "11"
71
+ },
72
+ "software": {
73
+ "language": "python",
74
+ "version": "3.12.5"
75
+ },
76
+ "infrastructure": {
77
+ "infraType": "publicCloud",
78
+ "cloudProvider": "grid5000",
79
+ "cloudInstance": "chuc",
80
+ "components": [
81
+ {
82
+ "componentName": "Nvidia A100-SXM4-40GB",
83
+ "componentType": "gpu",
84
+ "nbComponent": 4,
85
+ "memorySize": 40,
86
+ "manufacturer": "Nvidia",
87
+ "share": 1
88
+ },
89
+ {
90
+ "componentName": "AMD EPYC 7513 (Zen 3)",
91
+ "componentType": "cpu",
92
+ "nbComponent": 1,
93
+ "manufacturer": "AMD",
94
+ "share": 1
95
+ },
96
+ {
97
+ "componentName": "ram",
98
+ "componentType": "ram",
99
+ "nbComponent": 1,
100
+ "memorySize": 512
101
+ }
102
+ ]
103
+ },
104
+ "environment": {
105
+ "country": "france",
106
+ "location": "lille"
107
+ },
108
+ "quality": "high"
109
+ }
data/2025-04-09T15:57:00_e3b351e6-ff88-4e98-a702-6eb240de2868.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "e3b351e6-ff88-4e98-a702-6eb240de2868",
6
+ "reportStatus": "draft",
7
+ "reportDatetime": "2025-08-19T15:12:04.348808",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 3644,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3"
23
+ }
24
+ ],
25
+ "dataset": [
26
+ {
27
+ "dataUsage": "input",
28
+ "dataType": "token",
29
+ "dataQuantity": 946440,
30
+ "source": "public",
31
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
32
+ "owner": "evalplus"
33
+ },
34
+ {
35
+ "dataUsage": "output",
36
+ "dataType": "token",
37
+ "dataQuantity": 745290.0,
38
+ "source": "public",
39
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
40
+ "owner": "evalplus"
41
+ }
42
+ ],
43
+ "taskDescription": "Code optimization - Continuous batching"
44
+ },
45
+ "measures": [
46
+ {
47
+ "measurementMethod": "perf",
48
+ "version": "5.10.237",
49
+ "cpuTrackingMode": "rapl",
50
+ "powerCalibrationMeasurement": 0.000526675,
51
+ "durationCalibrationMeasurement": 30.1,
52
+ "powerConsumption": 0.01679630277777778,
53
+ "measurementDuration": 622.2,
54
+ "measurementDateTime": "2025-04-09T15:57:00"
55
+ },
56
+ {
57
+ "measurementMethod": "nvidia-smi",
58
+ "version": "v535.183.06",
59
+ "gpuTrackingMode": "nvml",
60
+ "powerCalibrationMeasurement": 0.0022218027777777777,
61
+ "durationCalibrationMeasurement": 30.1,
62
+ "powerConsumption": 0.15821084166666669,
63
+ "measurementDuration": 622.2,
64
+ "measurementDateTime": "2025-04-09T15:57:00"
65
+ }
66
+ ],
67
+ "system": {
68
+ "os": "Linux",
69
+ "distribution": "Debian",
70
+ "distributionVersion": "11"
71
+ },
72
+ "software": {
73
+ "language": "python",
74
+ "version": "3.12.5"
75
+ },
76
+ "infrastructure": {
77
+ "infraType": "publicCloud",
78
+ "cloudProvider": "grid5000",
79
+ "cloudInstance": "chuc",
80
+ "components": [
81
+ {
82
+ "componentName": "Nvidia A100-SXM4-40GB",
83
+ "componentType": "gpu",
84
+ "nbComponent": 4,
85
+ "memorySize": 40,
86
+ "manufacturer": "Nvidia",
87
+ "share": 1
88
+ },
89
+ {
90
+ "componentName": "AMD EPYC 7513 (Zen 3)",
91
+ "componentType": "cpu",
92
+ "nbComponent": 1,
93
+ "manufacturer": "AMD",
94
+ "share": 1
95
+ },
96
+ {
97
+ "componentName": "ram",
98
+ "componentType": "ram",
99
+ "nbComponent": 1,
100
+ "memorySize": 512
101
+ }
102
+ ]
103
+ },
104
+ "environment": {
105
+ "country": "france",
106
+ "location": "lille"
107
+ },
108
+ "quality": "high"
109
+ }
data/2025-04-09T15:57:00_f5d63622-f053-4674-a416-270eb67af771.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "f5d63622-f053-4674-a416-270eb67af771",
6
+ "reportStatus": "draft",
7
+ "reportDatetime": "2025-08-19T15:12:04.348019",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 4720,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3"
23
+ }
24
+ ],
25
+ "dataset": [
26
+ {
27
+ "dataUsage": "input",
28
+ "dataType": "token",
29
+ "dataQuantity": 583440,
30
+ "source": "public",
31
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
32
+ "owner": "evalplus"
33
+ },
34
+ {
35
+ "dataUsage": "output",
36
+ "dataType": "token",
37
+ "dataQuantity": 720611.9999999995,
38
+ "source": "public",
39
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
40
+ "owner": "evalplus"
41
+ }
42
+ ],
43
+ "taskDescription": "Code generation from EvalPerf prompts - Continuous batching"
44
+ },
45
+ "measures": [
46
+ {
47
+ "measurementMethod": "perf",
48
+ "version": "5.10.237",
49
+ "cpuTrackingMode": "rapl",
50
+ "powerCalibrationMeasurement": 0.0005174499999999999,
51
+ "durationCalibrationMeasurement": 30.0,
52
+ "powerConsumption": 0.008403875,
53
+ "measurementDuration": 337.9,
54
+ "measurementDateTime": "2025-04-09T15:57:00"
55
+ },
56
+ {
57
+ "measurementMethod": "nvidia-smi",
58
+ "version": "v535.183.06",
59
+ "gpuTrackingMode": "nvml",
60
+ "powerCalibrationMeasurement": 0.0022278750000000003,
61
+ "durationCalibrationMeasurement": 30.0,
62
+ "powerConsumption": 0.08680710833333334,
63
+ "measurementDuration": 337.9,
64
+ "measurementDateTime": "2025-04-09T15:57:00"
65
+ }
66
+ ],
67
+ "system": {
68
+ "os": "Linux",
69
+ "distribution": "Debian",
70
+ "distributionVersion": "11"
71
+ },
72
+ "software": {
73
+ "language": "python",
74
+ "version": "3.12.5"
75
+ },
76
+ "infrastructure": {
77
+ "infraType": "publicCloud",
78
+ "cloudProvider": "grid5000",
79
+ "cloudInstance": "chuc",
80
+ "components": [
81
+ {
82
+ "componentName": "Nvidia A100-SXM4-40GB",
83
+ "componentType": "gpu",
84
+ "nbComponent": 4,
85
+ "memorySize": 40,
86
+ "manufacturer": "Nvidia",
87
+ "share": 1
88
+ },
89
+ {
90
+ "componentName": "AMD EPYC 7513 (Zen 3)",
91
+ "componentType": "cpu",
92
+ "nbComponent": 1,
93
+ "manufacturer": "AMD",
94
+ "share": 1
95
+ },
96
+ {
97
+ "componentName": "ram",
98
+ "componentType": "ram",
99
+ "nbComponent": 1,
100
+ "memorySize": 512
101
+ }
102
+ ]
103
+ },
104
+ "environment": {
105
+ "country": "france",
106
+ "location": "lille"
107
+ },
108
+ "quality": "high"
109
+ }
data/2025-04-09T15:57:00_f94c5a69-c199-4c75-b6cc-ff78f7fa1e6e.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "f94c5a69-c199-4c75-b6cc-ff78f7fa1e6e",
6
+ "reportStatus": "draft",
7
+ "reportDatetime": "2025-08-19T15:12:04.348317",
8
+ "publisher": {
9
+ "name": "Inria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "code generation",
16
+ "nbRequest": 4191,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "Qwen/Qwen2.5-Coder-14B-Instruct",
21
+ "framework": "vllm",
22
+ "frameworkVersion": "0.7.3"
23
+ }
24
+ ],
25
+ "dataset": [
26
+ {
27
+ "dataUsage": "input",
28
+ "dataType": "token",
29
+ "dataQuantity": 1071680,
30
+ "source": "public",
31
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
32
+ "owner": "evalplus"
33
+ },
34
+ {
35
+ "dataUsage": "output",
36
+ "dataType": "token",
37
+ "dataQuantity": 944429.0,
38
+ "source": "public",
39
+ "sourceUri": "https://huggingface.co/datasets/evalplus/evalperf",
40
+ "owner": "evalplus"
41
+ }
42
+ ],
43
+ "taskDescription": "Code optimization - Continuous batching"
44
+ },
45
+ "measures": [
46
+ {
47
+ "measurementMethod": "perf",
48
+ "version": "5.10.237",
49
+ "cpuTrackingMode": "rapl",
50
+ "powerCalibrationMeasurement": 0.0005258444444444445,
51
+ "durationCalibrationMeasurement": 30.0,
52
+ "powerConsumption": 0.018224211111111114,
53
+ "measurementDuration": 671.3,
54
+ "measurementDateTime": "2025-04-09T15:57:00"
55
+ },
56
+ {
57
+ "measurementMethod": "nvidia-smi",
58
+ "version": "v535.183.06",
59
+ "gpuTrackingMode": "nvml",
60
+ "powerCalibrationMeasurement": 0.0022474305555555555,
61
+ "durationCalibrationMeasurement": 30.0,
62
+ "powerConsumption": 0.1729249611111111,
63
+ "measurementDuration": 671.3,
64
+ "measurementDateTime": "2025-04-09T15:57:00"
65
+ }
66
+ ],
67
+ "system": {
68
+ "os": "Linux",
69
+ "distribution": "Debian",
70
+ "distributionVersion": "11"
71
+ },
72
+ "software": {
73
+ "language": "python",
74
+ "version": "3.12.5"
75
+ },
76
+ "infrastructure": {
77
+ "infraType": "publicCloud",
78
+ "cloudProvider": "grid5000",
79
+ "cloudInstance": "chuc",
80
+ "components": [
81
+ {
82
+ "componentName": "Nvidia A100-SXM4-40GB",
83
+ "componentType": "gpu",
84
+ "nbComponent": 4,
85
+ "memorySize": 40,
86
+ "manufacturer": "Nvidia",
87
+ "share": 1
88
+ },
89
+ {
90
+ "componentName": "AMD EPYC 7513 (Zen 3)",
91
+ "componentType": "cpu",
92
+ "nbComponent": 1,
93
+ "manufacturer": "AMD",
94
+ "share": 1
95
+ },
96
+ {
97
+ "componentName": "ram",
98
+ "componentType": "ram",
99
+ "nbComponent": 1,
100
+ "memorySize": 512
101
+ }
102
+ ]
103
+ },
104
+ "environment": {
105
+ "country": "france",
106
+ "location": "lille"
107
+ },
108
+ "quality": "high"
109
+ }