File size: 1,719 Bytes
b5601c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import json
from pathlib import Path
import pandas as pd
from tqdm import tqdm
from project_settings import project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default=(project_path / "original_data/weibo-400w").as_posix(),
type=str
)
parser.add_argument(
"--output_file",
default=(project_path / "data/weibo.jsonl"),
type=str
)
args = parser.parse_args()
return args
def main():
args = get_args()
data_dir = Path(args.data_dir)
questions = list()
with open(data_dir / "stc_weibo_train_post", "r", encoding="utf-8") as f:
for row in f:
row = str(row).strip()
row = "".join(row.split())
questions.append(row)
answers = list()
with open(data_dir / "stc_weibo_train_response.part", "r", encoding="utf-8") as f:
for row in f:
row = str(row).strip()
row = "".join(row.split())
answers.append(row)
if len(questions) != len(answers):
raise AssertionError
with open(args.output_file, "w", encoding="utf-8") as f:
for question, answer in tqdm(zip(questions, answers)):
row = {
"conversation": [
{"role": "human", "message": question},
{"role": "assistant", "message": answer},
],
"category": None,
"data_source": "weibo",
}
row = json.dumps(row, ensure_ascii=False)
f.write("{}\n".format(row))
return
if __name__ == '__main__':
main()
|