File size: 1,288 Bytes
72dc8a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
import json
import random
from datasets import Dataset, DatasetDict

# Configuration
input_folder = "./Raw"  # Replace with the path to your folder containing .jsonl files
train_ratio = 0.8  # 80% for training, 20% for validation
output_repo = "BXYMartin/OpenHearthstoneLLM"  # Replace with your Hugging Face dataset repository name

# Load all .jsonl files from the folder
data = []
for filename in os.listdir(input_folder):
    if filename.endswith(".jsonl"):
        with open(os.path.join(input_folder, filename), "r", encoding="utf-8") as f:
            for line in f:
                blob = json.loads(line)
                entry = {key: str(value) for key, value in blob.items()}
                data.append(entry)

# Shuffle the data
random.shuffle(data)

# Split the data into train and validation sets
train_size = int(len(data) * train_ratio)
train_data = data[:train_size]
val_data = data[train_size:]

# Convert to Hugging Face Dataset
train_dataset = Dataset.from_list(train_data)
val_dataset = Dataset.from_list(val_data)

# Create a DatasetDict
dataset_dict = DatasetDict({
    "train": train_dataset,
    "validation": val_dataset
})

# Push to Hugging Face Hub
dataset_dict.push_to_hub(output_repo)

print(f"Dataset pushed to Hugging Face Hub: {output_repo}")