sftv3 / build_v3_swift_data.py
Fengx1nn's picture
Add files using upload-large-folder tool
37bfd13 verified
import json
import argparse
import os
import base64
import io
import numpy as np
import imageio.v3 as iio
from PIL import Image
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description="Build MS-Swift SFT Dataset (BBox + Start/End Points)")
parser.add_argument("--traj_path", type=str, required=True, help="Path to processed_trajectories.jsonl")
parser.add_argument("--task_path", type=str, required=True, help="Path to task.jsonl")
parser.add_argument("--origin_bbox_path", type=str, required=True, help="Path to original bbox.jsonl")
parser.add_argument("--output_path", type=str, required=True, help="Output path for swift_sft.jsonl")
parser.add_argument("--workers", type=int, default=8, help="Number of worker threads")
return parser.parse_args()
def load_auxiliary_data(task_path, bbox_path):
print("Loading auxiliary metadata (Tasks & Labels)...")
meta_map = defaultdict(dict)
print(f"Reading tasks from {task_path}...")
with open(task_path, 'r', encoding='utf-8') as f:
for line in f:
if not line.strip(): continue
try:
item = json.loads(line)
v_path = item.get('video_path')
task = item.get('object_bbox', {}).get('task', "")
if v_path and task:
meta_map[v_path]['task'] = task
except Exception:
continue
print(f"Reading labels from {bbox_path}...")
count_labels = 0
with open(bbox_path, 'r', encoding='utf-8') as f:
for line in f:
if not line.strip(): continue
try:
item = json.loads(line)
v_path = item.get('video_path')
label = item.get('object_bbox', {}).get('label', "target_object")
if v_path:
meta_map[v_path]['label'] = label
count_labels += 1
except Exception:
continue
print(f"Metadata loaded. Found labels for {count_labels} entries.")
return meta_map
def numpy_to_base64(img_np):
if img_np.dtype != np.uint8:
img_np = (img_np).astype(np.uint8)
img = Image.fromarray(img_np)
buffer = io.BytesIO()
img.save(buffer, format="JPEG", quality=90)
img_bytes = buffer.getvalue()
img_base64 = base64.b64encode(img_bytes).decode('utf-8')
return f"data:image/jpeg;base64,{img_base64}"
def process_single_video(video_path, samples, meta_info):
results = []
task_desc = meta_info.get('task', "")
obj_label = meta_info.get('label', "target_object")
if not task_desc:
return []
try:
if not os.path.exists(video_path):
return []
samples.sort(key=lambda x: x['frame_idx'])
for sample in samples:
frame_idx = sample['frame_idx']
bbox = sample['bbox']
full_traj = sample['traj']
if not full_traj or len(full_traj) == 0:
continue
start_point = [(bbox[0] + bbox[2]) // 2, (bbox[1] + bbox[3]) // 2]
end_point = full_traj[-1]
try:
frame = iio.imread(video_path, index=frame_idx, plugin="pyav")
image_base64 = numpy_to_base64(frame)
response_data = {
"box_2d": bbox,
"label": obj_label,
"start_point": start_point,
"end_point": end_point
}
response_str = "```json\n" + json.dumps(response_data, ensure_ascii=False) + "\n```"
user_content = (
f"<image>\n"
f"Task: {task_desc}\n"
f"Detect the target object and predict its trajectory start point and end point. "
f"Output the result in JSON format with the following keys: \"box_2d\", \"label\", \"start_point\", and \"end_point\"."
)
swift_item = {
"messages": [
{
"role": "user",
"content": user_content
},
{
"role": "assistant",
"content": response_str
}
],
"images": [image_base64]
}
results.append(swift_item)
except IndexError as e:
# print(f"Error reading frame {frame_idx} of video {video_path}: {e}")
continue
except Exception as e:
# print(f"Error reading frame {frame_idx} of video {video_path}: {e}")
continue
except Exception as e:
print(f"Error opening video {video_path}: {e}")
return []
return results
def main():
args = parse_args()
meta_map = load_auxiliary_data(args.task_path, args.origin_bbox_path)
print(f"Loading trajectory data from {args.traj_path}...")
video_groups = defaultdict(list)
total_traj_samples = 0
with open(args.traj_path, 'r', encoding='utf-8') as f:
for line in f:
if not line.strip(): continue
item = json.loads(line)
v_path = item['video_path']
if v_path in meta_map and 'task' in meta_map[v_path]:
video_groups[v_path].append(item)
total_traj_samples += 1
print(f"Grouped {total_traj_samples} samples into {len(video_groups)} videos.")
print(f"Starting processing with {args.workers} workers...")
final_dataset = []
with ThreadPoolExecutor(max_workers=args.workers) as executor:
future_to_video = {}
for video_path, samples in video_groups.items():
meta_info = meta_map[video_path]
future = executor.submit(process_single_video, video_path, samples, meta_info)
future_to_video[future] = video_path
for future in tqdm(as_completed(future_to_video), total=len(video_groups), desc="Processing Videos"):
res = future.result()
if res:
final_dataset.extend(res)
print(f"Saving {len(final_dataset)} samples to {args.output_path}...")
os.makedirs(os.path.dirname(os.path.abspath(args.output_path)), exist_ok=True)
with open(args.output_path, 'w', encoding='utf-8') as f:
for item in final_dataset:
f.write(json.dumps(item, ensure_ascii=False) + '\n')
print("Done.")
if __name__ == "__main__":
main()
'''
python sft_data/v3/build_v3_swift_data.py \
--traj_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/traj_outputs/agilex_top_video_000k_034k_bbox_traj_results.jsonl \
--task_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/agilex_top_video_000k_034k_task.jsonl \
--origin_bbox_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/agilex_top_video_000k_034k_bbox.jsonl \
--output_path sft_data/v3/agilex_top_video_000k_034k_bbox_traj_sft_data_2pts.jsonl \
--workers 64 &
python sft_data/v3/build_v3_swift_data.py \
--traj_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/traj_outputs/franka_base_video_000k_100k_bbox_traj_results.jsonl \
--task_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/franka_base_video_000k_100k_task.jsonl \
--origin_bbox_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/franka_base_video_000k_100k_bbox.jsonl \
--output_path sft_data/v3/franka_base_video_000k_100k_bbox_traj_sft_data_2pts.jsonl \
--workers 64 &
python sft_data/v3/build_v3_swift_data.py \
--traj_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/traj_outputs/franka_base_video_100k_200k_bbox_traj_results.jsonl \
--task_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/franka_base_video_100k_200k_task.jsonl \
--origin_bbox_path /storage/v-xiangxizheng/dataset/InternData-M1/annotaions/franka_base_video_100k_200k_bbox.jsonl \
--output_path sft_data/v3/franka_base_video_100k_200k_bbox_traj_sft_data_2pts.jsonl \
--workers 64 &
hf upload sftv3 . --repo-type dataset --private &
'''