Datasets:
Tasks:
Translation
Modalities:
Text
Formats:
json
Languages:
English
Size:
10K - 100K
Tags:
code
License:
Simon Strandgaard
commited on
Commit
·
b5f2434
1
Parent(s):
6fee525
Here is my project.
Browse files- README.md +59 -3
- generate_dataset.py +104 -0
- random_data.py +11 -0
- test_random_data.py +21 -0
README.md
CHANGED
|
@@ -1,3 +1,59 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: mit
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
task_categories:
|
| 4 |
+
- translation
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
tags:
|
| 8 |
+
- code
|
| 9 |
+
pretty_name: Base64 encode version1
|
| 10 |
+
size_categories:
|
| 11 |
+
- 10K<n<100K
|
| 12 |
+
configs:
|
| 13 |
+
- config_name: default
|
| 14 |
+
data_files:
|
| 15 |
+
- split: train
|
| 16 |
+
path: data/data.jsonl
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
# Dataset: Base64 encode version1
|
| 20 |
+
|
| 21 |
+
This dataset is for improving base64 encoding capabilities.
|
| 22 |
+
|
| 23 |
+
`GPT 4o` is great at base64 encoding.
|
| 24 |
+
|
| 25 |
+
```
|
| 26 |
+
user:
|
| 27 |
+
convert this hex data to base64:
|
| 28 |
+
880567a1
|
| 29 |
+
|
| 30 |
+
assistant:
|
| 31 |
+
The base64 encoding of the hex data `880567a1` is `iAVnoQ==`.
|
| 32 |
+
|
| 33 |
+
user:
|
| 34 |
+
convert this json data representing a byte sequence to base64:
|
| 35 |
+
[30,41,183]
|
| 36 |
+
|
| 37 |
+
assistant:
|
| 38 |
+
The base64 encoding of the JSON data `[30,41,183]` is `Him3`.
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
However `llama3` is terrible at base64 encoding.
|
| 42 |
+
|
| 43 |
+
Short examples of what `data.jsonl` looks like:
|
| 44 |
+
|
| 45 |
+
```text
|
| 46 |
+
{"instruction": "Encode hex to Base64", "input": "ecfc2db9ba6049165b", "output": "7PwtubpgSRZb"}
|
| 47 |
+
{"instruction": "change HEX to base64", "input": "60926e782008", "output": "YJJueCAI"}
|
| 48 |
+
{"instruction": "Json to base64", "input": "[77,62,160,64,248,233,105,133,5,248,89,239]", "output": "TT6gQPjpaYUF+Fnv"}
|
| 49 |
+
{"instruction": "Change Json to BASE64", "input": "[10,59,42,251,112,1]", "output": "Cjsq+3AB"}
|
| 50 |
+
{"instruction": "Convert JSON to Base64", "input": "[236,201,129,100,238]", "output": "7MmBZO4="}
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
# Generate dataset
|
| 54 |
+
|
| 55 |
+
```
|
| 56 |
+
PROMPT> python generate_dataset.py
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
This creates the `data.jsonl` file.
|
generate_dataset.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from random_data import generate_random_byte_array
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import random
|
| 5 |
+
import base64
|
| 6 |
+
|
| 7 |
+
def base64_encode_byte_array(byte_array):
|
| 8 |
+
return base64.b64encode(byte_array).decode('utf-8')
|
| 9 |
+
|
| 10 |
+
def generate_dataset_item(seed):
|
| 11 |
+
length = random.Random(seed + 1000).randint(0, 127)
|
| 12 |
+
byte_array = generate_random_byte_array(length=length, seed=seed + 1001)
|
| 13 |
+
|
| 14 |
+
input_formats = [
|
| 15 |
+
'hex',
|
| 16 |
+
'json'
|
| 17 |
+
]
|
| 18 |
+
input_format = random.Random(seed + 1002).choice(input_formats)
|
| 19 |
+
|
| 20 |
+
names_hex = [
|
| 21 |
+
'Hexadecimal',
|
| 22 |
+
'hexadecimal',
|
| 23 |
+
'hex',
|
| 24 |
+
'Hex',
|
| 25 |
+
'HEX',
|
| 26 |
+
]
|
| 27 |
+
names_json = [
|
| 28 |
+
'Json',
|
| 29 |
+
'json',
|
| 30 |
+
'JSON',
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
name_input = None
|
| 34 |
+
if input_format == 'hex':
|
| 35 |
+
name_input = random.Random(seed + 1003).choice(names_hex)
|
| 36 |
+
else:
|
| 37 |
+
if input_format == 'json':
|
| 38 |
+
name_input = random.Random(seed + 1004).choice(names_json)
|
| 39 |
+
|
| 40 |
+
name_outputs = [
|
| 41 |
+
'base64',
|
| 42 |
+
'Base64',
|
| 43 |
+
'BASE64',
|
| 44 |
+
]
|
| 45 |
+
name_output = random.Random(seed + 1005).choice(name_outputs)
|
| 46 |
+
|
| 47 |
+
instructions = [
|
| 48 |
+
f'Encode {name_input} to {name_output}',
|
| 49 |
+
f'encode {name_input} to {name_output}',
|
| 50 |
+
f'convert {name_input} to {name_output}',
|
| 51 |
+
f'Convert {name_input} to {name_output}',
|
| 52 |
+
f'Transform {name_input} to {name_output}',
|
| 53 |
+
f'transform {name_input} to {name_output}',
|
| 54 |
+
f'Change {name_input} to {name_output}',
|
| 55 |
+
f'change {name_input} to {name_output}',
|
| 56 |
+
f'{name_input} to {name_output}',
|
| 57 |
+
f'{name_output} from {name_input}',
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
instruction = random.Random(seed + 1006).choice(instructions)
|
| 61 |
+
|
| 62 |
+
output = base64_encode_byte_array(byte_array)
|
| 63 |
+
|
| 64 |
+
input = None
|
| 65 |
+
if input_format == 'hex':
|
| 66 |
+
input = byte_array.hex()
|
| 67 |
+
else:
|
| 68 |
+
if input_format == 'json':
|
| 69 |
+
input = json.dumps(list(byte_array), separators=(',', ':'))
|
| 70 |
+
|
| 71 |
+
dict = {
|
| 72 |
+
'instruction': instruction,
|
| 73 |
+
'input': input,
|
| 74 |
+
'output': output
|
| 75 |
+
}
|
| 76 |
+
return dict
|
| 77 |
+
|
| 78 |
+
def generate_dataset(max_num_samples=1000, max_byte_size=1024*1024, seed_start=500000000):
|
| 79 |
+
dataset = []
|
| 80 |
+
dataset_byte_size = 0
|
| 81 |
+
for i in range(max_num_samples):
|
| 82 |
+
item = generate_dataset_item(seed_start + i)
|
| 83 |
+
bytes = len(json.dumps(item))
|
| 84 |
+
if dataset_byte_size + bytes > max_byte_size:
|
| 85 |
+
break
|
| 86 |
+
dataset_byte_size += bytes
|
| 87 |
+
dataset.append(item)
|
| 88 |
+
return dataset
|
| 89 |
+
|
| 90 |
+
dataset = generate_dataset(
|
| 91 |
+
max_num_samples=50000,
|
| 92 |
+
max_byte_size=1024*1024*20,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# Save dataset to file
|
| 96 |
+
filename = 'data.jsonl'
|
| 97 |
+
with open(filename, 'w') as f:
|
| 98 |
+
for item in dataset:
|
| 99 |
+
f.write(json.dumps(item) + '\n')
|
| 100 |
+
|
| 101 |
+
# Summary
|
| 102 |
+
file_size = os.path.getsize(filename)
|
| 103 |
+
print(f"Generated {len(dataset)} samples, saved to {filename}, file size: {file_size} bytes.")
|
| 104 |
+
|
random_data.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
def generate_random_byte_array(length, seed):
|
| 4 |
+
random.seed(seed)
|
| 5 |
+
return bytearray(random.randint(0, 255) for _ in range(length))
|
| 6 |
+
|
| 7 |
+
if __name__ == "__main__":
|
| 8 |
+
length = 10
|
| 9 |
+
seed = 42
|
| 10 |
+
byte_array = generate_random_byte_array(length, seed)
|
| 11 |
+
print(byte_array)
|
test_random_data.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
from random_data import generate_random_byte_array
|
| 3 |
+
|
| 4 |
+
class TestRandomData(unittest.TestCase):
|
| 5 |
+
def test_generate_random_byte_array_empty(self):
|
| 6 |
+
actual = generate_random_byte_array(length=0, seed=0)
|
| 7 |
+
expected = bytearray()
|
| 8 |
+
self.assertEqual(actual, expected)
|
| 9 |
+
|
| 10 |
+
def test_generate_random_byte_array_length1(self):
|
| 11 |
+
actual = generate_random_byte_array(length=1, seed=0)
|
| 12 |
+
expected = bytearray([0xc5])
|
| 13 |
+
self.assertEqual(actual, expected)
|
| 14 |
+
|
| 15 |
+
def test_generate_random_byte_array_length2(self):
|
| 16 |
+
actual = generate_random_byte_array(length=2, seed=0)
|
| 17 |
+
expected = bytearray([0xc5, 0xd7])
|
| 18 |
+
self.assertEqual(actual, expected)
|
| 19 |
+
|
| 20 |
+
if __name__ == '__main__':
|
| 21 |
+
unittest.main()
|