File size: 1,712 Bytes
7ee43dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from langchain.text_splitter import MarkdownTextSplitter
import json
from tqdm import tqdm

def read_jsonl(file_path):
    with open(file_path, 'r', encoding='utf-8') as file:
        return [json.loads(line) for line in file]
    
def save_to_jsonl(data, output_path):
    with open(output_path, 'w', encoding='utf-8') as file:
        for entry in data:
            file.write(json.dumps(entry, ensure_ascii=False) + '\n')
            
def process_with_markdown_splitter(file_path):
    data = read_jsonl(file_path)

    chunk_size = 512
    chunk_overlap = 50
    splitter = MarkdownTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)

    processed = []
    
    types = []

    for entry in data:
        content = entry['text']
        if type(content) == list:
            content = str(content)
        _id = entry['_id']
        title = entry['title']
        chunks = splitter.split_text(content)
        chunk_string = ''
        
        for chunk in chunks:
            chunk_string += chunk
            chunk_string += '\n\n'
            
        markdown = ""
        markdown += f"| _id | {_id} |\n"
        markdown += f"| title | {title} |\n"
        markdown += f"| text | {chunk_string}\n"

        processed.append({"_id": _id, "title": title, "text": markdown})
    
    print(types)
    return processed

names = ['ConvFinQA', 'FinanceBench', 'FinDER', 'FinQA', 'FinQABench', 'MultiHeirtt', 'TATQA']

for name in tqdm(names):
    for d in ['corpus', 'queries']:

        file_path = f'./{name}/{d}.jsonl'
        output_path = f'./processed_markdown/{name}/{d}.jsonl'

        result = process_with_markdown_splitter(file_path)
    
        save_to_jsonl(result, output_path)