|
|
import json |
|
|
import random |
|
|
|
|
|
|
|
|
def load_json(file_path): |
|
|
with open(file_path, 'r', encoding='utf-8') as f: |
|
|
data = json.load(f) |
|
|
return data |
|
|
|
|
|
|
|
|
def split_data(index_list, train_ratio=0.8): |
|
|
random.shuffle(index_list) |
|
|
split_point = int(len(index_list) * train_ratio) |
|
|
train_set = sorted(index_list[:split_point]) |
|
|
test_set = sorted(index_list[split_point:]) |
|
|
return train_set, test_set |
|
|
|
|
|
|
|
|
def save_json(data, file_path): |
|
|
with open(file_path, 'w', encoding='utf-8') as f: |
|
|
json.dump(data, f, ensure_ascii=False, indent=4) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
input_file = "/home/panwen.hu/workspace/jian.zhang/EAI/EAI2025/pixmo-points/Datasets/valid_one_points_indices.json" |
|
|
output_file = "./Datasets/valid_one_points_indices_split.json" |
|
|
|
|
|
data = load_json(input_file) |
|
|
index_list = data.get("index", []) |
|
|
|
|
|
train_set, test_set = split_data(index_list, train_ratio=0.996374199) |
|
|
print(len(train_set),len(test_set)) |
|
|
|
|
|
new_data = { |
|
|
"train": train_set, |
|
|
"test": test_set |
|
|
} |
|
|
|
|
|
save_json(new_data, output_file) |
|
|
print("数据集划分完成,已保存到", output_file) |
|
|
|