File size: 2,676 Bytes
dabdcee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import os
import csv
import json
import datasets

EXPERIMENTS = {
    "test1": "05-19-35",
    "test2": "06-38-03",
    "test3": "15-12-05",
    "test4": "10-49-18",
    "test5": "11-04-24",
}

FILETYPES = [
    "output",
    "cpu_monitor",
    "cpustat_monitor",
    "irq_monitor",
    "softirq_monitor",
    "meminfo_monitor",
    "sysinfo",
]


class MonitoringConfig(datasets.BuilderConfig):
    def __init__(self, filetype, **kwargs):
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)
        self.filetype = filetype


class MonitoringDataset(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        MonitoringConfig(
            name=filetype,
            filetype=filetype,
            description=f"{filetype} across all experiments"
        )
        for filetype in FILETYPES
    ]

    def _info(self):
        filetype = self.config.filetype

        # JSON sysinfo
        if filetype == "sysinfo":
            return datasets.DatasetInfo(
                features=datasets.Features({"json": datasets.Value("string")})
            )

        # For optional output.csv, detect header only if a sample exists
        for folder in EXPERIMENTS.values():
            path = os.path.join(folder, f"{filetype}.csv")
            if os.path.exists(path):
                with open(path, "r") as f:
                    header = next(csv.reader(f))
                return datasets.DatasetInfo(
                    features=datasets.Features({col: datasets.Value("string") for col in header})
                )

        # No experiment contains this filetype → return empty schema
        return datasets.DatasetInfo(
            features=datasets.Features({})
        )

    def _split_generators(self, dl_manager):
        return [
            datasets.SplitGenerator(
                name=exp_name,
                gen_kwargs={"folder": folder, "filetype": self.config.filetype}
            )
            for exp_name, folder in EXPERIMENTS.items()
        ]

    def _generate_examples(self, folder, filetype):

        # JSON
        if filetype == "sysinfo":
            json_path = os.path.join(folder, "sysinfo.json")
            if not os.path.exists(json_path):
                return
            with open(json_path, "r") as f:
                data = json.load(f)
            yield 0, {"json": json.dumps(data)}
            return

        path = os.path.join(folder, f"{filetype}.csv")
        if not os.path.exists(path):
            return  # gracefully skip this experiment

        with open(path, "r") as f:
            reader = csv.DictReader(f)
            for idx, row in enumerate(reader):
                yield idx, row