| """
|
| Convert HuggingFace dataset Volko76/french-classic-conversations to JSONL format
|
| with messages structure including system prompt.
|
| """
|
|
|
| from datasets import load_dataset
|
| import json
|
| import os
|
|
|
|
|
| SYSTEM_MESSAGE = "You are a helpful assistant."
|
| OUTPUT_FILE = "french_classic_conversations.jsonl"
|
|
|
| def main():
|
|
|
| print("Loading dataset from HuggingFace...")
|
| dataset = load_dataset("Volko76/french-classic-conversations")
|
| print(f"Dataset loaded: {len(dataset['train'])} rows")
|
| print(f"Columns: {dataset['train'].column_names}")
|
|
|
|
|
| sample = dataset['train'][0]
|
| print("\nSample row structure:")
|
| print(str(sample)[:1000])
|
|
|
|
|
| print(f"\nConverting to JSONL format...")
|
|
|
| with open(OUTPUT_FILE, 'w', encoding='utf-8') as f:
|
| for row in dataset['train']:
|
|
|
| conversations = row['conversations']
|
| if isinstance(conversations, str):
|
| conversations = json.loads(conversations)
|
|
|
|
|
| messages = [{"role": "system", "content": SYSTEM_MESSAGE}]
|
|
|
|
|
| for msg in conversations:
|
| messages.append({
|
| "role": msg['role'],
|
| "content": msg['content']
|
| })
|
|
|
|
|
| json_line = json.dumps({"messages": messages}, ensure_ascii=False)
|
| f.write(json_line + '\n')
|
|
|
| print(f"Conversion complete! Output saved to: {OUTPUT_FILE}")
|
|
|
|
|
| print("\n" + "="*60)
|
| print("First 2 entries from the output file:\n")
|
| with open(OUTPUT_FILE, 'r', encoding='utf-8') as f:
|
| for i, line in enumerate(f):
|
| if i >= 2:
|
| break
|
| data = json.loads(line)
|
| print(f"Entry {i+1}:")
|
| print(json.dumps(data, indent=2, ensure_ascii=False)[:1000])
|
| print("\n" + "-"*40 + "\n")
|
|
|
|
|
| with open(OUTPUT_FILE, 'r', encoding='utf-8') as f:
|
| total_lines = sum(1 for _ in f)
|
|
|
| file_size = os.path.getsize(OUTPUT_FILE) / (1024 * 1024)
|
|
|
| print("="*60)
|
| print(f"Total conversations: {total_lines}")
|
| print(f"File size: {file_size:.2f} MB")
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|