Jian Zhang commited on
Upload 8 files
Browse files- .gitattributes +2 -0
- dataset_statistics_json.py +49 -0
- delete_hash_mismatch.py +30 -0
- download_pixmo.py +191 -0
- get_all_instructions.py +47 -0
- split_all_data_index.py +37 -0
- train_test_split.py +40 -0
- valid_one_points_indices.json +3 -0
- valid_one_points_indices_split.json +3 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
valid_one_points_indices_split.json filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
valid_one_points_indices.json filter=lfs diff=lfs merge=lfs -text
|
dataset_statistics_json.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hashlib
|
| 3 |
+
import datasets
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
root_path = './Datasets/'
|
| 8 |
+
data = datasets.load_dataset("allenai/pixmo-points", split="train", cache_dir=root_path)
|
| 9 |
+
len_data = len(data)
|
| 10 |
+
image_folder = os.path.join(root_path,"pixmo_images")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
hash_image_all = set()
|
| 14 |
+
indices_one_point_all = []
|
| 15 |
+
hash_image_one_point_all = set()
|
| 16 |
+
|
| 17 |
+
record = {'index':[],'image_hash':set()}
|
| 18 |
+
|
| 19 |
+
for i,example in enumerate(data):
|
| 20 |
+
print(i,len_data,f"{(i / len_data) * 100:.3f}%")
|
| 21 |
+
points = example['points']
|
| 22 |
+
image_hash = example["image_sha256"]
|
| 23 |
+
if len(points) == 1:
|
| 24 |
+
indices_one_point_all.append(i)
|
| 25 |
+
hash_image_one_point_all.add(image_hash)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
hash_image_all.add(image_hash)
|
| 30 |
+
image_url = example["image_url"]
|
| 31 |
+
image_path = os.path.join(image_folder, f"{image_hash}.jpg")
|
| 32 |
+
|
| 33 |
+
if not os.path.exists(image_path):
|
| 34 |
+
print('image does not exist, skip')
|
| 35 |
+
continue # 图片不存在,跳过
|
| 36 |
+
if os.path.exists(image_path) and len(points)==1:
|
| 37 |
+
record['index'].append(i)
|
| 38 |
+
record['image_hash'].add(image_hash)
|
| 39 |
+
|
| 40 |
+
record['number of indices'] = len(record['index'])
|
| 41 |
+
record['number of images'] = len(record['image_hash'])
|
| 42 |
+
|
| 43 |
+
print('all images',len(hash_image_all))
|
| 44 |
+
print('all one point',len(indices_one_point_all),len(hash_image_one_point_all))
|
| 45 |
+
print('downloaded one ponit','number of indices',len(record['index']),'images:',len(record['image_hash']))
|
| 46 |
+
|
| 47 |
+
record['image_hash'] = list(record['image_hash'])
|
| 48 |
+
with open("valid_one_points_indices.json", "w") as f:
|
| 49 |
+
json.dump(record, f, indent=4)
|
delete_hash_mismatch.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hashlib
|
| 3 |
+
import datasets
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
root_path = './Datasets/'
|
| 7 |
+
data = datasets.load_dataset("allenai/pixmo-points", split="train", cache_dir=root_path)
|
| 8 |
+
len_data = len(data)
|
| 9 |
+
image_folder = os.path.join(root_path,"pixmo_images")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
for i,example in enumerate(data):
|
| 13 |
+
print(i,len_data,f"{(i / len_data) * 100:.3f}%")
|
| 14 |
+
# break
|
| 15 |
+
image_hash = example["image_sha256"]
|
| 16 |
+
image_path = os.path.join(image_folder, f"{image_hash}.jpg")
|
| 17 |
+
|
| 18 |
+
if not os.path.exists(image_path):
|
| 19 |
+
print('image does not exist, skip')
|
| 20 |
+
continue # 图片不存在,跳过
|
| 21 |
+
|
| 22 |
+
with open(image_path, "rb") as img_file:
|
| 23 |
+
image_bytes = img_file.read()
|
| 24 |
+
file_hash = hashlib.sha256(image_bytes).hexdigest()
|
| 25 |
+
print('hash matches')
|
| 26 |
+
|
| 27 |
+
if file_hash != image_hash:
|
| 28 |
+
os.remove(image_path) # 哈希不匹配,删除图片
|
| 29 |
+
print('hash mismatch, delete')
|
| 30 |
+
# break
|
download_pixmo.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets
|
| 2 |
+
from hashlib import sha256
|
| 3 |
+
import requests
|
| 4 |
+
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import json
|
| 7 |
+
import multiprocessing
|
| 8 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 9 |
+
import threading
|
| 10 |
+
|
| 11 |
+
# 创建一个全局锁
|
| 12 |
+
lock = threading.Lock()
|
| 13 |
+
|
| 14 |
+
root_path = './Datasets/'
|
| 15 |
+
cache_dir = Path(root_path, "pixmo_images")
|
| 16 |
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
| 17 |
+
|
| 18 |
+
# 加载数据集
|
| 19 |
+
data = datasets.load_dataset("allenai/pixmo-points", split="train", cache_dir=root_path)
|
| 20 |
+
|
| 21 |
+
print(f"Dataset size: {len(data)}")
|
| 22 |
+
len_data = len(data)
|
| 23 |
+
|
| 24 |
+
# 线程池参数
|
| 25 |
+
# MAX_WORKERS = 12 # 可调整,建议 5-20 之间
|
| 26 |
+
cpu_cores = multiprocessing.cpu_count()
|
| 27 |
+
MAX_WORKERS = min(64, cpu_cores * 4)
|
| 28 |
+
print(f"CPU 核心数: {cpu_cores}, 线程池大小: {MAX_WORKERS}")
|
| 29 |
+
|
| 30 |
+
# 记录成功和失败的下载
|
| 31 |
+
success_indices = []
|
| 32 |
+
failed_indices = []
|
| 33 |
+
|
| 34 |
+
def download_image(i, example, timeout=15):
|
| 35 |
+
""" 下载图片并验证哈希 """
|
| 36 |
+
# print(i,len_data,f"{(i / len_data) * 100:.3f} %")
|
| 37 |
+
points = example['points']
|
| 38 |
+
if len(points) != 1:
|
| 39 |
+
return None, None # 仅处理 `len(points) == 1` 的数据
|
| 40 |
+
|
| 41 |
+
image_url = example["image_url"]
|
| 42 |
+
image_filename = f"{example['image_sha256']}.jpg"
|
| 43 |
+
image_path = cache_dir / image_filename
|
| 44 |
+
|
| 45 |
+
if image_path.exists():
|
| 46 |
+
print(image_path,'exists, skip')
|
| 47 |
+
return i, None # 已下载则跳过
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
# 下载图片
|
| 51 |
+
# 下载时设置进程锁,避免多个进行写入一个同名文件
|
| 52 |
+
image_bytes = requests.get(image_url, timeout=timeout).content
|
| 53 |
+
with lock:
|
| 54 |
+
with open(image_path, "wb") as f:
|
| 55 |
+
f.write(image_bytes)
|
| 56 |
+
|
| 57 |
+
# 验证 SHA256
|
| 58 |
+
byte_hash = sha256(image_bytes).hexdigest()
|
| 59 |
+
if byte_hash != example["image_sha256"]:
|
| 60 |
+
# with lock: # 加锁删除错误文件,防止并发删除问题
|
| 61 |
+
image_path.unlink()
|
| 62 |
+
print(image_path,'hash mismatch, delete')
|
| 63 |
+
return None, i # 记录失败
|
| 64 |
+
|
| 65 |
+
print(image_path,'save success!')
|
| 66 |
+
return i, None # 记录成功
|
| 67 |
+
|
| 68 |
+
except requests.exceptions.Timeout:
|
| 69 |
+
print(f"Timeout while downloading {image_url}. Skipping...")
|
| 70 |
+
return None, i
|
| 71 |
+
except requests.exceptions.RequestException as e:
|
| 72 |
+
print(f"Error downloading {image_url}: {e}")
|
| 73 |
+
return None, i # 记录失败
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
completed_count = 0
|
| 77 |
+
|
| 78 |
+
# 多线程下载
|
| 79 |
+
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
| 80 |
+
# future_to_index = {executor.submit(download_image, i, data[i]): i for i in range(1038456,len_data)}
|
| 81 |
+
|
| 82 |
+
# for future in as_completed(future_to_index):
|
| 83 |
+
# success, failed = future.result()
|
| 84 |
+
# if success is not None:
|
| 85 |
+
# success_indices.append(success)
|
| 86 |
+
# if failed is not None:
|
| 87 |
+
# failed_indices.append(failed)
|
| 88 |
+
|
| 89 |
+
start_index = 1541083
|
| 90 |
+
future_to_index = {}
|
| 91 |
+
|
| 92 |
+
# 采用步进分配索引,避免相邻 i 被同一时间下载
|
| 93 |
+
for offset in range(0,MAX_WORKERS): # 12 个线程,每个线程处理不同的起点
|
| 94 |
+
# for i in range(1541083 + offset, len_data, MAX_WORKERS): # 每次步长为 MAX_WORKERS
|
| 95 |
+
for i in range(0 + offset, 1541083, MAX_WORKERS): # 每次步长为 MAX_WORKERS
|
| 96 |
+
future = executor.submit(download_image, i, data[i])
|
| 97 |
+
future_to_index[future] = i
|
| 98 |
+
|
| 99 |
+
# 处理结果
|
| 100 |
+
for future in as_completed(future_to_index):
|
| 101 |
+
success, failed = future.result()
|
| 102 |
+
if success is not None:
|
| 103 |
+
success_indices.append(success)
|
| 104 |
+
if failed is not None:
|
| 105 |
+
failed_indices.append(failed)
|
| 106 |
+
|
| 107 |
+
# 在主线程更新进度
|
| 108 |
+
completed_count += 1
|
| 109 |
+
# print(f"Progress: {completed_count}/{len_data} ({(completed_count / len_data) * 100:.3f}%)")
|
| 110 |
+
print(f"Progress: {completed_count}/{1541083} ({(completed_count / 1541083) * 100:.3f}%)")
|
| 111 |
+
|
| 112 |
+
# 保存成功和失败的索引
|
| 113 |
+
with open("one_points_indices.json", "w") as f:
|
| 114 |
+
json.dump(success_indices, f, indent=4)
|
| 115 |
+
with open("failed_indices.json", "w") as f:
|
| 116 |
+
json.dump(failed_indices, f, indent=4)
|
| 117 |
+
|
| 118 |
+
print(f"Total successful downloads: {len(success_indices)}")
|
| 119 |
+
print(f"Total failed downloads: {len(failed_indices)}")
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# import datasets
|
| 123 |
+
# from hashlib import sha256
|
| 124 |
+
# import requests
|
| 125 |
+
# import os
|
| 126 |
+
# from pathlib import Path
|
| 127 |
+
# from hashlib import sha256
|
| 128 |
+
# import requests
|
| 129 |
+
# from datasets import load_dataset
|
| 130 |
+
# import json
|
| 131 |
+
|
| 132 |
+
# # os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
|
| 133 |
+
# # export HF_ENDPOINT=https://hf-mirror.com
|
| 134 |
+
|
| 135 |
+
# root_path = './Datasets/'
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# cache_dir = Path(root_path, "pixmo_images")
|
| 139 |
+
|
| 140 |
+
# cache_dir.mkdir(parents=True, exist_ok=True)
|
| 141 |
+
|
| 142 |
+
# # 加载数据集
|
| 143 |
+
# data = load_dataset("allenai/pixmo-points", split="train", cache_dir=root_path)
|
| 144 |
+
|
| 145 |
+
# print(len(data))
|
| 146 |
+
# len_data = len(data)
|
| 147 |
+
# def download_image(example, timeout=10):
|
| 148 |
+
# # print(example)
|
| 149 |
+
# points = example['points']
|
| 150 |
+
# len_points = len(points)
|
| 151 |
+
# if len_points > 1:
|
| 152 |
+
# return None
|
| 153 |
+
# if len_points ==0:
|
| 154 |
+
# return None
|
| 155 |
+
# image_url = example["image_url"]
|
| 156 |
+
# print(example['image_sha256'],len_points,points)
|
| 157 |
+
# image_filename = f"{example['image_sha256']}.jpg"
|
| 158 |
+
# image_path = cache_dir / image_filename
|
| 159 |
+
|
| 160 |
+
# # 如果图片未缓存,则下载
|
| 161 |
+
# if not image_path.exists():
|
| 162 |
+
# try:
|
| 163 |
+
# # 设置超时参数
|
| 164 |
+
# image_bytes = requests.get(image_url, timeout=timeout).content
|
| 165 |
+
# with open(image_path, "wb") as f:
|
| 166 |
+
# f.write(image_bytes)
|
| 167 |
+
|
| 168 |
+
# # 验证 SHA256
|
| 169 |
+
# byte_hash = sha256(image_bytes).hexdigest()
|
| 170 |
+
# assert byte_hash == example["image_sha256"], "SHA256 mismatch!"
|
| 171 |
+
# except requests.exceptions.Timeout:
|
| 172 |
+
# print(f"Timeout while downloading {image_url}. Skipping...")
|
| 173 |
+
# except Exception as e:
|
| 174 |
+
# print(f"Error downloading {image_url}: {e}")
|
| 175 |
+
# return True
|
| 176 |
+
|
| 177 |
+
# # 预下载所有图片(仅第一次运行需要)
|
| 178 |
+
# summ = 0
|
| 179 |
+
# success_indices = [] # 用于存储成功的 i 值
|
| 180 |
+
|
| 181 |
+
# for i, example in enumerate(data):
|
| 182 |
+
# print(i, len_data, f"{(i / len_data) * 100:.2f} %")
|
| 183 |
+
# s = download_image(example)
|
| 184 |
+
# if s:
|
| 185 |
+
# summ += 1
|
| 186 |
+
# success_indices.append(i) # 记录成功的 i
|
| 187 |
+
|
| 188 |
+
# # 将成功的 i 值保存到 JSON 文件
|
| 189 |
+
# with open("one_points_indices.json", "w") as f:
|
| 190 |
+
# json.dump(success_indices, f, indent=4)
|
| 191 |
+
# print("Total successful downloads:", summ)
|
get_all_instructions.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 4 |
+
from tqdm import tqdm # 导入 tqdm 库
|
| 5 |
+
import datasets
|
| 6 |
+
|
| 7 |
+
# 假设 PixmoDataset 类已经定义
|
| 8 |
+
root_path = './Datasets/'
|
| 9 |
+
data = datasets.load_dataset("allenai/pixmo-points", split="train", cache_dir=root_path)
|
| 10 |
+
len_data = len(data)
|
| 11 |
+
image_folder = os.path.join(root_path,"pixmo_images")
|
| 12 |
+
|
| 13 |
+
valid_one_points_indices = '/home/panwen.hu/workspace/jian.zhang/EAI/EAI2025/pixmo-points/Datasets/valid_one_points_indices.json'
|
| 14 |
+
|
| 15 |
+
def load_json(file_path):
|
| 16 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 17 |
+
data = json.load(f)
|
| 18 |
+
return data
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
data_json = load_json(valid_one_points_indices)
|
| 22 |
+
index_list = data_json.get("index", [])
|
| 23 |
+
|
| 24 |
+
ins_all = set()
|
| 25 |
+
|
| 26 |
+
def process_item(i):
|
| 27 |
+
print(i,len_data,f"{(i / len_data) * 100:.3f}%")
|
| 28 |
+
item = data[i]
|
| 29 |
+
instruction = item['label']
|
| 30 |
+
return instruction
|
| 31 |
+
|
| 32 |
+
# 使用 ThreadPoolExecutor 来并行处理
|
| 33 |
+
with ThreadPoolExecutor(max_workers=64) as executor: # max_workers 可以根据你的CPU核心数调整
|
| 34 |
+
# 提交任务到线程池
|
| 35 |
+
futures = [
|
| 36 |
+
executor.submit(process_item, i) for i in index_list
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
# 使用 tqdm 显示进度
|
| 40 |
+
for future in tqdm(as_completed(futures), total=len_data, desc="Processing"):
|
| 41 |
+
instruction = future.result()
|
| 42 |
+
ins_all.add(instruction)
|
| 43 |
+
|
| 44 |
+
# 保存结果到 JSON 文件
|
| 45 |
+
json_path = os.path.join('/home/panwen.hu/workspace/jian.zhang/EAI/EAI2025/Afford-RDT/data/encode_language/', "pixmo_all_instructions_one_point.json")
|
| 46 |
+
with open(json_path, "w", encoding="utf-8") as f:
|
| 47 |
+
json.dump(list(ins_all), f, indent=4)
|
split_all_data_index.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from datasets import load_dataset
|
| 4 |
+
|
| 5 |
+
def load_json(file_path):
|
| 6 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 7 |
+
data = json.load(f)
|
| 8 |
+
return data
|
| 9 |
+
|
| 10 |
+
class DatasetSaver:
|
| 11 |
+
def __init__(self, data_root):
|
| 12 |
+
self.data_root = data_root
|
| 13 |
+
# os.makedirs(self.data_root, exist_ok=True)
|
| 14 |
+
self.split_file = os.path.join(self.data_root, "valid_one_points_indices.json")
|
| 15 |
+
self.one_point_indices = load_json(self.split_file)["index"]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def save_data(self):
|
| 19 |
+
dataset = load_dataset("allenai/pixmo-points", split="train", cache_dir=self.data_root)
|
| 20 |
+
|
| 21 |
+
for idx in self.one_point_indices:
|
| 22 |
+
|
| 23 |
+
print(f" {idx + 1} files...",idx/len(dataset)*100,"%")
|
| 24 |
+
|
| 25 |
+
data = dataset[idx]
|
| 26 |
+
file_path = os.path.join(self.data_root, f"pixmo_data/{idx}.json")
|
| 27 |
+
with open(file_path, "w", encoding="utf-8") as f:
|
| 28 |
+
json.dump(data, f, ensure_ascii=False, indent=4)
|
| 29 |
+
print('saved')
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
print("All data saved successfully.")
|
| 34 |
+
|
| 35 |
+
# 使用示例
|
| 36 |
+
data_saver = DatasetSaver('/home/rongtao/jianzhang/pixmo-points/Datasets')
|
| 37 |
+
data_saver.save_data()
|
train_test_split.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
+
# 读取 JSON 文件
|
| 5 |
+
def load_json(file_path):
|
| 6 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 7 |
+
data = json.load(f)
|
| 8 |
+
return data
|
| 9 |
+
|
| 10 |
+
# 划分数据集
|
| 11 |
+
def split_data(index_list, train_ratio=0.8):
|
| 12 |
+
random.shuffle(index_list) # 打乱数据集
|
| 13 |
+
split_point = int(len(index_list) * train_ratio)
|
| 14 |
+
train_set = sorted(index_list[:split_point]) # 训练集排序
|
| 15 |
+
test_set = sorted(index_list[split_point:]) # 测试集排序
|
| 16 |
+
return train_set, test_set
|
| 17 |
+
|
| 18 |
+
# 保存 JSON 文件
|
| 19 |
+
def save_json(data, file_path):
|
| 20 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
| 21 |
+
json.dump(data, f, ensure_ascii=False, indent=4)
|
| 22 |
+
|
| 23 |
+
# 运行
|
| 24 |
+
if __name__ == "__main__":
|
| 25 |
+
input_file = "/home/panwen.hu/workspace/jian.zhang/EAI/EAI2025/pixmo-points/Datasets/valid_one_points_indices.json" # 原始JSON文件
|
| 26 |
+
output_file = "./Datasets/valid_one_points_indices_split.json" # 保存新数据的JSON文件
|
| 27 |
+
|
| 28 |
+
data = load_json(input_file)
|
| 29 |
+
index_list = data.get("index", [])
|
| 30 |
+
|
| 31 |
+
train_set, test_set = split_data(index_list, train_ratio=0.996374199)
|
| 32 |
+
print(len(train_set),len(test_set))
|
| 33 |
+
|
| 34 |
+
new_data = {
|
| 35 |
+
"train": train_set,
|
| 36 |
+
"test": test_set
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
save_json(new_data, output_file)
|
| 40 |
+
print("数据集划分完成,已保存到", output_file)
|
valid_one_points_indices.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9f3390de255b40a2b0a257f82cbbb507b468da56185d4a55bf3556311d71911
|
| 3 |
+
size 26473234
|
valid_one_points_indices_split.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fd130b27de4d8d490392a596a36165a326b38895a609aecf2ac33698d1f0cec
|
| 3 |
+
size 16732470
|