Datasets:
Tasks:
Text Generation
Modalities:
Text
Formats:
text
Languages:
English
Size:
10K - 100K
Tags:
deltarune
deltarune-chapter-1
deltarune-chapter-2
deltarune-chapter-3
deltarune-chapter-4
game-script
License:
File size: 1,330 Bytes
749663e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | import pandas as pd
import glob
import os
def make_parquets():
jsonl_files = glob.glob('chap*_dataset.jsonl')
if not jsonl_files:
print("[-] Files chap*_dataset.jsonl Not found in current dir")
return
all_dataframes = []
required_columns = ['context', 'speaker', 'text']
print(f"[*] Found files: {len(jsonl_files)}")
for file_name in jsonl_files:
try:
df = pd.read_json(file_name, lines=True)
df = df[required_columns]
output_name = file_name.replace('.jsonl', '.parquet')
df.to_parquet(output_name, index=False, engine='pyarrow')
print(f"[+] Created: {output_name} ({len(df)} strings)")
all_dataframes.append(df)
except KeyError:
print(f"[!] Error in {file_name}: missing required columns {required_columns}")
except Exception as e:
print(f"[!] Unable to process {file_name}: {e}")
if all_dataframes:
full_df = pd.concat(all_dataframes, ignore_index=True)
full_df.to_parquet('full_chapters_dataset.parquet', index=False, engine='pyarrow')
print(f"\n[OK] Main file ready: full_chapters_dataset.parquet ({len(full_df)} строк)")
if __name__ == "__main__":
make_parquets()
|