| |
| |
|
|
| import json |
| import math |
| import argparse |
| import random |
| from pathlib import Path |
| from typing import Any, Dict, List, Tuple, DefaultDict, Optional |
| from collections import defaultdict |
|
|
| |
|
|
| def load_answer_maps(path: str) -> Tuple[Dict[str, int], Dict[int, str]]: |
| """ |
| Loads the 2-entry list: |
| [ {text->id}, { "0": "text", "1": "text", ... } ] |
| Returns: |
| text2id (normalized text -> id), id2text (int id -> canonical text) |
| """ |
| with open(path, "r", encoding="utf-8") as f: |
| data = json.load(f) |
|
|
| if not isinstance(data, list) or len(data) < 2: |
| raise ValueError("answer_dict file must be a list of two dicts: [text2id, id2text].") |
|
|
| raw_text2id, raw_id2text = data[0], data[1] |
| if not isinstance(raw_text2id, dict) or not isinstance(raw_id2text, dict): |
| raise ValueError("Both elements in answer_dict must be dicts.") |
|
|
| |
| def norm_text(s: str) -> str: |
| return " ".join(s.strip().lower().split()) |
|
|
| text2id = {norm_text(k): int(v) for k, v in raw_text2id.items()} |
| id2text = {int(k): str(v) for k, v in raw_id2text.items()} |
| return text2id, id2text |
|
|
| def normalize_answer_text(s: str) -> str: |
| """Canonicalize spacing/case for consistent lookup; keep original wording style simple.""" |
| return " ".join(s.strip().lower().split()) |
|
|
| |
|
|
| def pick_situation(d: Dict[str, Any]) -> str: |
| return d.get("situation_text") or d.get("situation_multimodal") or "" |
|
|
| def get_location(d: Dict[str, Any]) -> List[float]: |
| md = d.get("meta_data") or {} |
| sp = md.get("start_point") |
| loc = sp if isinstance(sp, (list, tuple)) and len(sp) >= 3 else d.get("location", [0, 0, 0]) |
| return [float(loc[0]), float(loc[1]), float(loc[2])] |
|
|
| def yaw_to_vec3(yaw: float) -> List[float]: |
| return [math.cos(yaw), math.sin(yaw), 0.0] |
|
|
| def quat_to_yaw(q: List[float]) -> float: |
| if len(q) != 4: |
| return 0.0 |
| |
| x, y, z, w = map(float, q) |
| try: |
| siny_cosp = 2.0 * (w * z + x * y) |
| cosy_cosp = 1.0 - 2.0 * (y * y + z * z) |
| return math.atan2(siny_cosp, cosy_cosp) |
| except Exception: |
| pass |
| |
| w, x, y, z = map(float, q) |
| try: |
| siny_cosp = 2.0 * (w * z + x * y) |
| cosy_cosp = 1.0 - 2.0 * (y * y + z * z) |
| return math.atan2(siny_cosp, cosy_cosp) |
| except Exception: |
| return 0.0 |
|
|
| def get_orientation_vec(d: Dict[str, Any]) -> List[float]: |
| md = d.get("meta_data") or {} |
| if isinstance(md.get("start_ori"), (int, float)): |
| return yaw_to_vec3(float(md["start_ori"])) |
| q = d.get("orientation") |
| if isinstance(q, (list, tuple)) and len(q) == 4: |
| return yaw_to_vec3(quat_to_yaw(list(map(float, q)))) |
| if isinstance(q, (list, tuple)) and len(q) >= 3: |
| return [float(q[0]), float(q[1]), float(q[2])] |
| return [1.0, 0.0, 0.0] |
|
|
| |
|
|
| def eight_direction_answer(item: Dict[str, Any], text2id: Dict[str, int], id2text: Dict[int, str]) -> Optional[str]: |
| """ |
| Returns a SINGLE canonical text answer for eight_direction (e.g., 'turn left'), |
| resolving from either [code, 'text'] or just code or text, using id<->text maps. |
| """ |
| act = item.get("action") or item.get("meta_data", {}).get("action_type") or {} |
| v = act.get("eight_direction") |
|
|
| |
| if isinstance(v, (list, tuple)) and len(v) > 0: |
| |
| if len(v) >= 2 and isinstance(v[1], str) and v[1].strip(): |
| return id2text.get(text2id.get(normalize_answer_text(v[1]), -999), v[1].strip()) |
| |
| if isinstance(v[0], int): |
| return id2text.get(v[0], str(v[0])) |
|
|
| |
| if isinstance(v, int): |
| return id2text.get(v, str(v)) |
|
|
| |
| if isinstance(v, str) and v.strip(): |
| norm = normalize_answer_text(v) |
| |
| if norm in text2id: |
| return id2text.get(text2id[norm], v.strip()) |
| return v.strip() |
|
|
| return None |
|
|
| |
|
|
| def convert_to_flat(nested: Dict[str, Any], text2id: Dict[str, int], id2text: Dict[int, str]) -> List[Dict[str, Any]]: |
| """ |
| Convert nested JSON to flat records, keeping ONLY samples with eight_direction, |
| and normalizing the answer to the canonical text form. |
| """ |
| out = [] |
| for scan_id, steps in nested.items(): |
| if not isinstance(steps, dict): |
| continue |
| for idx_key, item in steps.items(): |
| if not isinstance(item, dict): |
| continue |
|
|
| answer_text = eight_direction_answer(item, text2id, id2text) |
| if not answer_text: |
| continue |
|
|
| situation = pick_situation(item) |
| location = get_location(item) |
| orientation = get_orientation_vec(item) |
| try: |
| index = int(idx_key) |
| except Exception: |
| index = len(out) |
|
|
| rec = { |
| "question": "What action should I take next step?", |
| "situation_text": situation, |
| "answers": [answer_text], |
| "scan_id": item.get("scan_id") or scan_id, |
| "location": location, |
| "interaction": item.get("interaction"), |
| "orientation": orientation, |
| "type": "navigation", |
| "index": index, |
| "question_id": f"{item.get('scan_id') or scan_id}_{index}", |
| } |
| out.append(rec) |
| return out |
|
|
| |
|
|
| def records_to_annotations(records: List[Dict[str, Any]], text2id: Dict[str, int]) -> Dict[str, Any]: |
| """ |
| Map flat records into 'annotations' format. |
| - Sets answer_id using the provided text2id mapping. |
| - If text not found in mapping, answer_id = -1. |
| """ |
| ann_list: List[Dict[str, Any]] = [] |
| for r in records: |
| dx, dy, _ = r["orientation"] |
| x, y, z = r["location"] |
| answers = r.get("answers") or [] |
|
|
| answers_obj = [] |
| for a in answers: |
| norm = normalize_answer_text(str(a)) |
| ans_id = text2id.get(norm, -1) |
| answers_obj.append( |
| { |
| "answer": str(a), |
| "answer_confidence": "yes", |
| "answer_id": ans_id |
| } |
| ) |
|
|
| ann = { |
| "scan_id": r["scan_id"], |
| "question_type": r.get("type", "navigation"), |
| "answer_type": "other", |
| "question_id": r["question_id"], |
| "answers": answers_obj, |
| "rotation": {"_x": dx, "_y": dy, "_z": 0.0, "_w": 0.0}, |
| "position": {"x": x, "y": y, "z": z}, |
| } |
| ann_list.append(ann) |
| return {"annotations": ann_list} |
|
|
| |
|
|
| def group_by_scene(records: List[Dict[str, Any]]) -> DefaultDict[str, List[Dict[str, Any]]]: |
| buckets: DefaultDict[str, List[Dict[str, Any]]] = defaultdict(list) |
| for r in records: |
| buckets[r["scan_id"]].append(r) |
| return buckets |
|
|
| def split_scenes(scene_ids: List[str], val_ratio: float, test_ratio: float, seed: int) -> Tuple[List[str], List[str], List[str]]: |
| rnd = random.Random(seed) |
| ids = list(scene_ids) |
| rnd.shuffle(ids) |
| n = len(ids) |
| n_val = int(round(n * val_ratio)) |
| n_test = int(round(n * test_ratio)) |
| n_val = min(n_val, n) |
| n_test = min(n_test, max(0, n - n_val)) |
| train = ids[n_val + n_test :] |
| val = ids[:n_val] |
| test = ids[n_val:n_val + n_test] |
| return train, val, test |
|
|
| def flatten_from_ids(buckets: Dict[str, List[Dict[str, Any]]], ids: List[str]) -> List[Dict[str, Any]]: |
| out: List[Dict[str, Any]] = [] |
| for sid in ids: |
| out.extend(buckets.get(sid, [])) |
| return out |
|
|
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Convert & split dataset (8:1:1 by scene) and write annotation files with answer_id from answer_dict.") |
| parser.add_argument("--input", type=str, default="msnn/msnn_scannet.json", help="Path to input JSON file") |
| parser.add_argument("--output_dir", type=str, default=None, help="Directory to write outputs (default: input's directory)") |
| parser.add_argument("--answer_dict", type=str, default="msnn/answer_dict.json", help="Path to the answer mapping JSON (list of two dicts)") |
| parser.add_argument("--seed", type=int, default=42, help="Random seed for scene split") |
| parser.add_argument("--val_ratio", type=float, default=0.1, help="Validation ratio (by scene)") |
| parser.add_argument("--test_ratio", type=float, default=0.1, help="Test ratio (by scene)") |
| args = parser.parse_args() |
|
|
| |
| text2id, id2text = load_answer_maps(args.answer_dict) |
|
|
| |
| with open(args.input, "r", encoding="utf-8") as f: |
| nested = json.load(f) |
|
|
| |
| flat = convert_to_flat(nested, text2id, id2text) |
| buckets = group_by_scene(flat) |
| scene_ids = sorted(buckets.keys()) |
| train_ids, val_ids, test_ids = split_scenes(scene_ids, args.val_ratio, args.test_ratio, args.seed) |
|
|
| train_recs = flatten_from_ids(buckets, train_ids) |
| val_recs = flatten_from_ids(buckets, val_ids) |
| test_recs = flatten_from_ids(buckets, test_ids) |
|
|
| |
| out_dir = Path(args.output_dir) if args.output_dir else Path(args.input).parent |
| out_dir.mkdir(parents=True, exist_ok=True) |
|
|
| |
| p_train = out_dir / "msnn_train_eight_direction.json" |
| p_val = out_dir / "msnn_val_eight_direction.json" |
| p_test = out_dir / "msnn_test_eight_direction.json" |
|
|
| |
| pa_train = out_dir / "msnn_train_eight_direction_annotations.json" |
| pa_val = out_dir / "msnn_val_eight_direction_annotations.json" |
| pa_test = out_dir / "msnn_test_eight_direction_annotations.json" |
|
|
| |
| with open(p_train, "w", encoding="utf-8") as f: json.dump(train_recs, f, ensure_ascii=False, indent=4) |
| with open(p_val, "w", encoding="utf-8") as f: json.dump(val_recs, f, ensure_ascii=False, indent=4) |
| with open(p_test, "w", encoding="utf-8") as f: json.dump(test_recs, f, ensure_ascii=False, indent=4) |
|
|
| |
| with open(pa_train, "w", encoding="utf-8") as f: json.dump(records_to_annotations(train_recs, text2id), f, ensure_ascii=False, indent=4) |
| with open(pa_val, "w", encoding="utf-8") as f: json.dump(records_to_annotations(val_recs, text2id), f, ensure_ascii=False, indent=4) |
| with open(pa_test, "w", encoding="utf-8") as f: json.dump(records_to_annotations(test_recs, text2id), f, ensure_ascii=False, indent=4) |
|
|
| print(f"Scenes: total={len(scene_ids)} | train={len(train_ids)} | val={len(val_ids)} | test={len(test_ids)}") |
| print(f"Samples: train={len(train_recs)} | val={len(val_recs)} | test={len(test_recs)}") |
| print("Wrote:") |
| print(f" {p_train}") |
| print(f" {p_val}") |
| print(f" {p_test}") |
| print(f" {pa_train}") |
| print(f" {pa_val}") |
| print(f" {pa_test}") |
|
|
| if __name__ == "__main__": |
| main() |