File size: 3,229 Bytes
4ac1fc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os
import json

def save_to_json(data, filename):
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=4)
    print(f"save to {filename}, data len: {len(data)}")
def load_json(file_path):
    with open(file_path, "r", encoding="utf-8") as f:
        data = json.load(f)
    print(f"load from {file_path}, data len: {len(data)}")
    return data


# 输入和输出路径
input_folder = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits"
# output_folder = "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/code/graph"

# # 创建输出文件夹
# os.makedirs(output_folder, exist_ok=True)

# 初始化分类字典
classified_data = {
    "1-hop": [],
    "2-hop": [],
    "3-hop": [],
    "4-hop": [],
    "5-hop": [],
    "other": []
}

new_data = []
# 遍历目标文件夹中的所有文件
for filename in sorted(os.listdir(input_folder)):
    
    if filename == "tagged_domain_keypoints":
        continue
    if "tagged" in filename and filename.endswith(".json"):
        file_path = os.path.join(input_folder, filename)
        print(filename)
        # 读取 JSON 文件
        with open(file_path, "r", encoding="utf-8") as f:
            data = json.load(f)

        # 遍历数据中的每个条目
        for item in data:
            # 提取 Reasoning Graph 部分
            tag_qwq = item.get("tag_qwq", "")
            reasoning_graph = tag_qwq.split("</think>")[-1].split("Graph:")[-1].split("Area:")[0].strip()

            # 判断 hop 数
            hop = None
            if "1-hop" in reasoning_graph:
                hop = "1-hop"
                item["hop"] = 1
            elif "2-hop" in reasoning_graph:
                hop = "2-hop"
                item["hop"] = 2
            elif "3-hop" in reasoning_graph:
                hop = "3-hop"
                item["hop"] = 3
            elif "4-hop" in reasoning_graph:
                hop = "4-hop"
                item["hop"] = 4
            elif "5-hop" in reasoning_graph:
                hop = "5-hop"
                item["hop"] = 5
            else:
                hop = "other"
                item["hop"] = -1
            new_data.append(item)
            # 将条目添加到对应的分类
            # classified_data[hop].append(item)

# 保存分类后的数据到不同文件
# for hop, items in classified_data.items():
#     output_file = os.path.join(output_folder, f"{hop}.json")
#     with open(output_file, "w", encoding="utf-8") as f:
#         json.dump(items, f, ensure_ascii=False, indent=4)
#     print(f"Saved {len(items)} items to {output_file}")

data_1 = load_json("/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/merged_tagged_domain_keypoints_keywords_count.json")

assert len(new_data) == len(data_1)
for item, item_1 in zip(new_data, data_1):
    assert item["idx"] == item_1["idx"], f"{item['idx']} {item_1['idx']}"
    assert item["Question"] == item_1["Question"]

    item_1["hop"] = item["hop"]

save_to_json(data_1, "/share/project/sunshuang/deep_search/data_syn/data/mixed_data/splits/tagged_domain_keypoints/merged_tagged_domain_keypoints_keywords_count_hop.json")