Der commited on
Commit
749663e
·
1 Parent(s): b35fcc0
parquet.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import glob
3
+ import os
4
+
5
+ def make_parquets():
6
+ jsonl_files = glob.glob('chap*_dataset.jsonl')
7
+
8
+ if not jsonl_files:
9
+ print("[-] Files chap*_dataset.jsonl Not found in current dir")
10
+ return
11
+
12
+ all_dataframes = []
13
+ required_columns = ['context', 'speaker', 'text']
14
+
15
+ print(f"[*] Found files: {len(jsonl_files)}")
16
+
17
+ for file_name in jsonl_files:
18
+ try:
19
+ df = pd.read_json(file_name, lines=True)
20
+
21
+ df = df[required_columns]
22
+
23
+ output_name = file_name.replace('.jsonl', '.parquet')
24
+ df.to_parquet(output_name, index=False, engine='pyarrow')
25
+
26
+ print(f"[+] Created: {output_name} ({len(df)} strings)")
27
+ all_dataframes.append(df)
28
+
29
+ except KeyError:
30
+ print(f"[!] Error in {file_name}: missing required columns {required_columns}")
31
+ except Exception as e:
32
+ print(f"[!] Unable to process {file_name}: {e}")
33
+
34
+ if all_dataframes:
35
+ full_df = pd.concat(all_dataframes, ignore_index=True)
36
+ full_df.to_parquet('full_chapters_dataset.parquet', index=False, engine='pyarrow')
37
+ print(f"\n[OK] Main file ready: full_chapters_dataset.parquet ({len(full_df)} строк)")
38
+
39
+ if __name__ == "__main__":
40
+ make_parquets()
parquet/chap1_dataset.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c3ff5216e980b7bb16a3a5668a59962d49d05ab1ca46ab8feeae1fe4fa43a85
3
+ size 41761
parquet/chap2_dataset.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7538cba3da5c052ef740069388c107e27ea23df45b9dee4f9c3c6ff63af7017a
3
+ size 93340
parquet/chap3_dataset.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8bb2da9c1334ea49772d6780af93f846637d759a86fd2ebea84477a9f002fb
3
+ size 101400
parquet/chap4_dataset.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1514521fc62386d1f901a31b0b03fff915e986e088fbcffa6a0debcdb850786e
3
+ size 126713
parquet/full_chapters_dataset.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da4ddb1e4558eb7b05e2e280f63dcf2536645bac90044ef55a4a9c36c0f6f175
3
+ size 357298
readme.md CHANGED
@@ -9,16 +9,20 @@ dataset_info:
9
  dtype: string
10
  task_categories:
11
  - text-generation
12
- - conversation
13
  language:
14
  - en
15
  tags:
16
  - deltarune
 
 
 
 
17
  - game-script
18
  - dialogue
19
  - toby-fox
20
  - rpg
21
  - undertale
 
22
  size_categories:
23
  - 10K<n<100K
24
  pretty_name: "Deltarune Chapters 1-4 Transcript"
 
9
  dtype: string
10
  task_categories:
11
  - text-generation
 
12
  language:
13
  - en
14
  tags:
15
  - deltarune
16
+ - deltarune-chapter-1
17
+ - deltarune-chapter-2
18
+ - deltarune-chapter-3
19
+ - deltarune-chapter-4
20
  - game-script
21
  - dialogue
22
  - toby-fox
23
  - rpg
24
  - undertale
25
+
26
  size_categories:
27
  - 10K<n<100K
28
  pretty_name: "Deltarune Chapters 1-4 Transcript"