import os import json import random from datasets import Dataset, DatasetDict # Configuration input_folder = "./Raw" # Replace with the path to your folder containing .jsonl files train_ratio = 0.8 # 80% for training, 20% for validation output_repo = "BXYMartin/OpenHearthstoneLLM" # Replace with your Hugging Face dataset repository name # Load all .jsonl files from the folder data = [] for filename in os.listdir(input_folder): if filename.endswith(".jsonl"): with open(os.path.join(input_folder, filename), "r", encoding="utf-8") as f: for line in f: blob = json.loads(line) entry = {key: str(value) for key, value in blob.items()} data.append(entry) # Shuffle the data random.shuffle(data) # Split the data into train and validation sets train_size = int(len(data) * train_ratio) train_data = data[:train_size] val_data = data[train_size:] # Convert to Hugging Face Dataset train_dataset = Dataset.from_list(train_data) val_dataset = Dataset.from_list(val_data) # Create a DatasetDict dataset_dict = DatasetDict({ "train": train_dataset, "validation": val_dataset }) # Push to Hugging Face Hub dataset_dict.push_to_hub(output_repo) print(f"Dataset pushed to Hugging Face Hub: {output_repo}")