Volko76 commited on
Commit
ee6fa55
·
verified ·
1 Parent(s): 80b5b70

Added to jsonl

Browse files
.gitattributes CHANGED
@@ -60,3 +60,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
60
  sharegpt_format.json filter=lfs diff=lfs merge=lfs -text
61
  chatml_format.jsonl filter=lfs diff=lfs merge=lfs -text
62
  alpaca_format.json filter=lfs diff=lfs merge=lfs -text
 
 
60
  sharegpt_format.json filter=lfs diff=lfs merge=lfs -text
61
  chatml_format.jsonl filter=lfs diff=lfs merge=lfs -text
62
  alpaca_format.json filter=lfs diff=lfs merge=lfs -text
63
+ french_classic_conversations.jsonl filter=lfs diff=lfs merge=lfs -text
convert_to_jsonl.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convert HuggingFace dataset Volko76/french-classic-conversations to JSONL format
3
+ with messages structure including system prompt.
4
+ """
5
+
6
+ from datasets import load_dataset
7
+ import json
8
+ import os
9
+
10
+ # Configuration
11
+ SYSTEM_MESSAGE = "You are a helpful assistant."
12
+ OUTPUT_FILE = "french_classic_conversations.jsonl"
13
+
14
+ def main():
15
+ # Load the dataset from HuggingFace
16
+ print("Loading dataset from HuggingFace...")
17
+ dataset = load_dataset("Volko76/french-classic-conversations")
18
+ print(f"Dataset loaded: {len(dataset['train'])} rows")
19
+ print(f"Columns: {dataset['train'].column_names}")
20
+
21
+ # Check the structure of the first row
22
+ sample = dataset['train'][0]
23
+ print("\nSample row structure:")
24
+ print(str(sample)[:1000])
25
+
26
+ # Convert to JSONL format with system message
27
+ print(f"\nConverting to JSONL format...")
28
+
29
+ with open(OUTPUT_FILE, 'w', encoding='utf-8') as f:
30
+ for row in dataset['train']:
31
+ # Get the conversations from the row - it might be a JSON string
32
+ conversations = row['conversations']
33
+ if isinstance(conversations, str):
34
+ conversations = json.loads(conversations)
35
+
36
+ # Create the messages list with system prompt first
37
+ messages = [{"role": "system", "content": SYSTEM_MESSAGE}]
38
+
39
+ # Add the conversation messages
40
+ for msg in conversations:
41
+ messages.append({
42
+ "role": msg['role'],
43
+ "content": msg['content']
44
+ })
45
+
46
+ # Write as JSONL
47
+ json_line = json.dumps({"messages": messages}, ensure_ascii=False)
48
+ f.write(json_line + '\n')
49
+
50
+ print(f"Conversion complete! Output saved to: {OUTPUT_FILE}")
51
+
52
+ # Verify the output - read and display first few lines
53
+ print("\n" + "="*60)
54
+ print("First 2 entries from the output file:\n")
55
+ with open(OUTPUT_FILE, 'r', encoding='utf-8') as f:
56
+ for i, line in enumerate(f):
57
+ if i >= 2:
58
+ break
59
+ data = json.loads(line)
60
+ print(f"Entry {i+1}:")
61
+ print(json.dumps(data, indent=2, ensure_ascii=False)[:1000])
62
+ print("\n" + "-"*40 + "\n")
63
+
64
+ # Count total entries and file size
65
+ with open(OUTPUT_FILE, 'r', encoding='utf-8') as f:
66
+ total_lines = sum(1 for _ in f)
67
+
68
+ file_size = os.path.getsize(OUTPUT_FILE) / (1024 * 1024) # MB
69
+
70
+ print("="*60)
71
+ print(f"Total conversations: {total_lines}")
72
+ print(f"File size: {file_size:.2f} MB")
73
+
74
+ if __name__ == "__main__":
75
+ main()
french_classic_conversations.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ba69acf9bc82296b9847036148189b80d4dfa9aeb1baa7d0a76c3680c33aef6
3
+ size 130711044
to_jsonl.ipynb ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "eb317b63",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Convert French Classic Conversations to JSONL Format\n",
9
+ "\n",
10
+ "This notebook converts the HuggingFace dataset `Volko76/french-classic-conversations` to a JSONL file with the messages format:\n",
11
+ "```json\n",
12
+ "{\n",
13
+ " \"messages\": [\n",
14
+ " { \"role\": \"system\", \"content\": \"You are a helpful assistant.\" },\n",
15
+ " { \"role\": \"user\", \"content\": \"...\" },\n",
16
+ " { \"role\": \"assistant\", \"content\": \"...\" }\n",
17
+ " ]\n",
18
+ "}\n",
19
+ "```"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": null,
25
+ "id": "631ee57f",
26
+ "metadata": {},
27
+ "outputs": [],
28
+ "source": [
29
+ "from datasets import load_dataset\n",
30
+ "import json\n",
31
+ "\n",
32
+ "# Load the dataset from HuggingFace\n",
33
+ "dataset = load_dataset(\"Volko76/french-classic-conversations\")\n",
34
+ "print(f\"Dataset loaded: {len(dataset['train'])} rows\")\n",
35
+ "print(f\"Columns: {dataset['train'].column_names}\")"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": null,
41
+ "id": "7382896f",
42
+ "metadata": {},
43
+ "outputs": [],
44
+ "source": [
45
+ "# Check the structure of the first row\n",
46
+ "sample = dataset['train'][0]\n",
47
+ "print(\"Sample row:\")\n",
48
+ "print(json.dumps(sample, indent=2, ensure_ascii=False)[:1000])"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": null,
54
+ "id": "11ed5944",
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": [
58
+ "# Convert to JSONL format with system message\n",
59
+ "SYSTEM_MESSAGE = \"You are a helpful assistant.\"\n",
60
+ "output_file = \"french_classic_conversations.jsonl\"\n",
61
+ "\n",
62
+ "with open(output_file, 'w', encoding='utf-8') as f:\n",
63
+ " for row in dataset['train']:\n",
64
+ " # Get the conversations from the row\n",
65
+ " conversations = row['conversations']\n",
66
+ " \n",
67
+ " # Create the messages list with system prompt first\n",
68
+ " messages = [{\"role\": \"system\", \"content\": SYSTEM_MESSAGE}]\n",
69
+ " \n",
70
+ " # Add the conversation messages\n",
71
+ " for msg in conversations:\n",
72
+ " messages.append({\n",
73
+ " \"role\": msg['role'],\n",
74
+ " \"content\": msg['content']\n",
75
+ " })\n",
76
+ " \n",
77
+ " # Write as JSONL\n",
78
+ " json_line = json.dumps({\"messages\": messages}, ensure_ascii=False)\n",
79
+ " f.write(json_line + '\\n')\n",
80
+ "\n",
81
+ "print(f\"Conversion complete! Output saved to: {output_file}\")"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": null,
87
+ "id": "b2db3191",
88
+ "metadata": {},
89
+ "outputs": [],
90
+ "source": [
91
+ "# Verify the output - read and display first few lines\n",
92
+ "print(\"First 3 entries from the output file:\\n\")\n",
93
+ "with open(output_file, 'r', encoding='utf-8') as f:\n",
94
+ " for i, line in enumerate(f):\n",
95
+ " if i >= 3:\n",
96
+ " break\n",
97
+ " data = json.loads(line)\n",
98
+ " print(f\"Entry {i+1}:\")\n",
99
+ " print(json.dumps(data, indent=2, ensure_ascii=False)[:800])\n",
100
+ " print(\"\\n\" + \"=\"*50 + \"\\n\")"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": null,
106
+ "id": "25aa1a17",
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "# Count total entries and file size\n",
111
+ "import os\n",
112
+ "\n",
113
+ "with open(output_file, 'r', encoding='utf-8') as f:\n",
114
+ " total_lines = sum(1 for _ in f)\n",
115
+ "\n",
116
+ "file_size = os.path.getsize(output_file) / (1024 * 1024) # MB\n",
117
+ "\n",
118
+ "print(f\"Total conversations: {total_lines}\")\n",
119
+ "print(f\"File size: {file_size:.2f} MB\")"
120
+ ]
121
+ }
122
+ ],
123
+ "metadata": {
124
+ "kernelspec": {
125
+ "display_name": "Python 3 (ipykernel)",
126
+ "language": "python",
127
+ "name": "python3"
128
+ }
129
+ },
130
+ "nbformat": 4,
131
+ "nbformat_minor": 5
132
+ }