thomaskim1130's picture
Upload 14 files (#9)
7ee43dc verified
from langchain.text_splitter import MarkdownTextSplitter
import json
from tqdm import tqdm
def read_jsonl(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
return [json.loads(line) for line in file]
def save_to_jsonl(data, output_path):
with open(output_path, 'w', encoding='utf-8') as file:
for entry in data:
file.write(json.dumps(entry, ensure_ascii=False) + '\n')
def process_with_markdown_splitter(file_path):
data = read_jsonl(file_path)
chunk_size = 512
chunk_overlap = 50
splitter = MarkdownTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
processed = []
types = []
for entry in data:
content = entry['text']
if type(content) == list:
content = str(content)
_id = entry['_id']
title = entry['title']
chunks = splitter.split_text(content)
chunk_string = ''
for chunk in chunks:
chunk_string += chunk
chunk_string += '\n\n'
markdown = ""
markdown += f"| _id | {_id} |\n"
markdown += f"| title | {title} |\n"
markdown += f"| text | {chunk_string}\n"
processed.append({"_id": _id, "title": title, "text": markdown})
print(types)
return processed
names = ['ConvFinQA', 'FinanceBench', 'FinDER', 'FinQA', 'FinQABench', 'MultiHeirtt', 'TATQA']
for name in tqdm(names):
for d in ['corpus', 'queries']:
file_path = f'./{name}/{d}.jsonl'
output_path = f'./processed_markdown/{name}/{d}.jsonl'
result = process_with_markdown_splitter(file_path)
save_to_jsonl(result, output_path)