alexjercan commited on
Commit
8185b3d
·
1 Parent(s): 38fb7be

feat(loader): added bugnet.py

Browse files
Files changed (2) hide show
  1. README.md +40 -0
  2. bugnet.py +117 -0
README.md CHANGED
@@ -1,3 +1,43 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: apache-2.0
3
+ dataset_info:
4
+ features:
5
+ - name: problem_id
6
+ dtype: string
7
+ - name: language
8
+ dtype: string
9
+ - name: original_status
10
+ dtype: string
11
+ - name: original_src
12
+ dtype: string
13
+ - name: changed_src
14
+ dtype: string
15
+ - name: change
16
+ dtype: string
17
+ - name: i1
18
+ dtype: uint32
19
+ - name: i2
20
+ dtype: uint32
21
+ - name: j1
22
+ dtype: uint32
23
+ - name: j2
24
+ dtype: uint32
25
+ - name: error
26
+ dtype: string
27
+ - name: stderr
28
+ dtype: string
29
+ - name: description
30
+ dtype: string
31
+ splits:
32
+ - name: train
33
+ num_bytes: 245629877
34
+ num_examples: 41905
35
+ - name: validation
36
+ num_bytes: 107794247
37
+ num_examples: 20037
38
+ - name: test
39
+ num_bytes: 108416842
40
+ num_examples: 20127
41
+ download_size: 295192809
42
+ dataset_size: 461840966
43
  ---
bugnet.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO: Add a description here."""
2
+
3
+
4
+ import json
5
+
6
+ import datasets
7
+
8
+ # TODO: Add BibTeX citation
9
+ _CITATION = """\
10
+ """
11
+
12
+ # TODO: Add description of the dataset here
13
+ _DESCRIPTION = """\
14
+ """
15
+
16
+ # TODO: Add a link to an official homepage for the dataset here
17
+ _HOMEPAGE = ""
18
+
19
+ # TODO: Add the licence for the dataset here if you can find it
20
+ _LICENSE = ""
21
+
22
+ _URL = "https://huggingface.co/datasets/alexjercan/bugnet/resolve/main/"
23
+ _URLS = {
24
+ "train": _URL + "train.jsonl",
25
+ "validation": _URL + "validation.jsonl",
26
+ "test": _URL + "test.jsonl",
27
+ "descriptions": _URL + "problem_descriptions.json",
28
+ }
29
+
30
+
31
+ class Bugnet(datasets.GeneratorBasedBuilder):
32
+ """TODO: Short description of my dataset."""
33
+
34
+ VERSION = datasets.Version("1.1.0")
35
+
36
+ def _info(self):
37
+ features = datasets.Features(
38
+ {
39
+ "problem_id": datasets.Value("string"),
40
+ "language": datasets.Value("string"),
41
+ "original_status": datasets.Value("string"),
42
+ "original_src": datasets.Value("string"),
43
+ "changed_src": datasets.Value("string"),
44
+ "change": datasets.Value("string"),
45
+ "i1": datasets.Value("uint32"),
46
+ "i2": datasets.Value("uint32"),
47
+ "j1": datasets.Value("uint32"),
48
+ "j2": datasets.Value("uint32"),
49
+ "error": datasets.Value("string"),
50
+ "stderr": datasets.Value("string"),
51
+ "description": datasets.Value("string"),
52
+ }
53
+ )
54
+
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=features,
58
+ homepage=_HOMEPAGE,
59
+ license=_LICENSE,
60
+ citation=_CITATION,
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ data_dir = dl_manager.download_and_extract(_URLS)
65
+ return [
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TRAIN,
68
+ gen_kwargs={
69
+ "filepath": data_dir["train"],
70
+ "descriptions": data_dir["descriptions"],
71
+ },
72
+ ),
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.VALIDATION,
75
+ gen_kwargs={
76
+ "filepath": data_dir["validation"],
77
+ "descriptions": data_dir["descriptions"],
78
+ },
79
+ ),
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TEST,
82
+ gen_kwargs={
83
+ "filepath": data_dir["test"],
84
+ "descriptions": data_dir["descriptions"],
85
+ },
86
+ ),
87
+ ]
88
+
89
+ def _generate_examples(self, filepath, descriptions):
90
+ with open(descriptions, encoding="utf-8") as file:
91
+ data = json.load(file)
92
+
93
+ descriptions = {}
94
+ for item in data:
95
+ key = item["problem_id"]
96
+ value = item["description"]
97
+ descriptions[key] = value
98
+
99
+ with open(filepath, encoding="utf-8") as file:
100
+ for key, row in enumerate(file):
101
+ data = json.loads(row)
102
+
103
+ yield key, {
104
+ "problem_id": data["problem_id"],
105
+ "language": data["language"],
106
+ "original_status": data["original_status"],
107
+ "original_src": data["original_src"],
108
+ "changed_src": data["changed_src"],
109
+ "change": data["change"],
110
+ "i1": data["i1"],
111
+ "i2": data["i2"],
112
+ "j1": data["j1"],
113
+ "j2": data["j2"],
114
+ "error": data["error"],
115
+ "stderr": data["stderr"],
116
+ "description": descriptions[data["problem_id"]],
117
+ }