File size: 4,883 Bytes
66822d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import os
import json
from collections import defaultdict
from transformers import LlamaTokenizer
from tqdm import tqdm
import argparse

def build_entity_triples(triples):
    entity_triples = defaultdict(list)
    for triple in triples:
        subj, relation, obj = triple
        # 构建完整的三元组字符串
        triple_str = f"{subj} - {relation} - {obj}"
        entity_triples[subj].append(triple_str)
    return entity_triples

def create_paragraphs_with_alignment(subj, old_triples_list, new_triples_list, tokenizer, max_tokens=512):
    paragraphs_old = []
    paragraphs_new = []
    boundaries = []  # 记录段落边界的索引

    index = 0
    start_idx = 0
    while index < len(old_triples_list):
        # 累积令牌长度
        candidate_old_triples = old_triples_list[start_idx:index+1]
        paragraph_text_old = ', '.join(candidate_old_triples)
        tokens_in_paragraph = len(tokenizer.encode(paragraph_text_old, add_special_tokens=False))

        if tokens_in_paragraph > max_tokens:
            if index == start_idx:
                # 单个三元组就超出限制,强制添加
                boundaries.append(index+1)
                start_idx = index+1
                index += 1
            else:
                boundaries.append(index)
                start_idx = index
        else:
            index += 1

    # 添加最后的边界
    if start_idx < len(old_triples_list):
        boundaries.append(len(old_triples_list))

    # 根据边界划分段落
    start = 0
    for end in boundaries:
        # 旧的段落
        old_paragraph_triples = old_triples_list[start:end]
        paragraph_text_old = ', '.join(old_paragraph_triples)
        paragraphs_old.append({
            "title": subj,
            "contents": paragraph_text_old
        })

        # 新的段落
        new_paragraph_triples = new_triples_list[start:end]
        paragraph_text_new = ', '.join(new_paragraph_triples)
        paragraphs_new.append({
            "title": subj,
            "contents": paragraph_text_new
        })

        start = end

    return paragraphs_old, paragraphs_new

def main(args):
    os.makedirs(os.path.dirname(args.old_output_file), exist_ok=True)
    os.makedirs(os.path.dirname(args.new_output_file), exist_ok=True)

    tokenizer = LlamaTokenizer.from_pretrained(args.model_path)

    with open(args.old_input_json_file, 'r') as f:
        old_triples = json.load(f)

    with open(args.new_input_json_file, 'r') as f:
        new_triples = json.load(f)

    old_entity_triples = build_entity_triples(old_triples)
    new_entity_triples = build_entity_triples(new_triples)

    # 处理所有实体
    all_paragraphs_old = []
    all_paragraphs_new = []
    entities = set(old_entity_triples.keys()).union(new_entity_triples.keys())

    for subj in tqdm(entities, desc="Processing entities", unit="entity"):
        old_triples_list = old_entity_triples.get(subj, [])
        new_triples_list = new_entity_triples.get(subj, [])

        # 确保新旧三元组列表长度一致
        max_length = max(len(old_triples_list), len(new_triples_list))
        if len(old_triples_list) < max_length:
            old_triples_list.extend([''] * (max_length - len(old_triples_list)))
        if len(new_triples_list) < max_length:
            new_triples_list.extend([''] * (max_length - len(new_triples_list)))

        paragraphs_old, paragraphs_new = create_paragraphs_with_alignment(
            subj, old_triples_list, new_triples_list, tokenizer, args.max_tokens
        )
        all_paragraphs_old.extend(paragraphs_old)
        all_paragraphs_new.extend(paragraphs_new)

    with open(args.old_output_file, 'w') as out_f:
        json.dump(all_paragraphs_old, out_f, ensure_ascii=False, indent=4)

    with open(args.new_output_file, 'w') as out_f:
        json.dump(all_paragraphs_new, out_f, ensure_ascii=False, indent=4)

    print(f"old {args.old_output_file}")
    print(f"new {args.new_output_file}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Process old and new triples")
    parser.add_argument('--old_input_json_file', type=str, required=True, help='旧的三元组JSON文件的路径。')
    parser.add_argument('--new_input_json_file', type=str, required=True, help='新的三元组JSON文件的路径。')
    parser.add_argument('--old_output_file', type=str, required=True, help='保存处理后的旧三元组输出文件的完整路径。')
    parser.add_argument('--new_output_file', type=str, required=True, help='保存处理后的新三元组输出文件的完整路径。')
    parser.add_argument('--model_path', type=str, required=True, help='预训练的Tokenizer模型的路径。')
    parser.add_argument('--max_tokens', type=int, default=512, help='每个段落的最大令牌数。')

    args = parser.parse_args()
    main(args)