update readme
Browse files
README.md
CHANGED
|
@@ -13,7 +13,7 @@ size_categories:
|
|
| 13 |
|
| 14 |
## 简介
|
| 15 |
|
| 16 |
-
搜集网络上的网文小说,清洗,分割后,用于训练大语言模型,共计9000本左右,大约
|
| 17 |
|
| 18 |
## 使用
|
| 19 |
|
|
@@ -32,34 +32,6 @@ size_categories:
|
|
| 32 |
|
| 33 |
```
|
| 34 |
|
| 35 |
-
### 示例代码
|
| 36 |
-
|
| 37 |
-
```python
|
| 38 |
-
def process_webnovel(input_dir, tokenizer):
|
| 39 |
-
for subdir, dirs, files in os.walk(input_dir):
|
| 40 |
-
all_tokens = []
|
| 41 |
-
for idx, file in enumerate(files):
|
| 42 |
-
# 只处理txt文件
|
| 43 |
-
if file.endswith('.jsonl'):
|
| 44 |
-
# 获取当前文件的绝对路径
|
| 45 |
-
file_path = os.path.join(subdir, file)
|
| 46 |
-
# 读取jsonl文件
|
| 47 |
-
with open(file_path, 'r', encoding='utf-8') as infile:
|
| 48 |
-
lines = infile.readlines()
|
| 49 |
-
|
| 50 |
-
for line in lines:
|
| 51 |
-
json_obj = json.loads(line) # 解析json字符串为python对象
|
| 52 |
-
text = json_obj['text']
|
| 53 |
-
tokens = tokenizer.encode(text, add_special_tokens=False)
|
| 54 |
-
tokens.append(tokenizer.special_tokens['<eos>'])
|
| 55 |
-
if len(tokens) > 5:
|
| 56 |
-
all_tokens += tokens
|
| 57 |
-
|
| 58 |
-
arr = np.array(all_tokens, dtype = np.uint16)
|
| 59 |
-
with open('./data/webnovel_{idx}.bin','wb') as f:
|
| 60 |
-
f.write(arr.tobytes())
|
| 61 |
-
```
|
| 62 |
-
|
| 63 |
|
| 64 |
|
| 65 |
|
|
|
|
| 13 |
|
| 14 |
## 简介
|
| 15 |
|
| 16 |
+
搜集网络上的网文小说,清洗,分割后,用于训练大语言模型,共计9000本左右,大约9B左右token。
|
| 17 |
|
| 18 |
## 使用
|
| 19 |
|
|
|
|
| 32 |
|
| 33 |
```
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
|
| 37 |
|