import json from collections import defaultdict import numpy as np TRAIN_JSON_PATH = "./data/miko/train.json" ACTION_STATS_PATH = "./data/action_statistics.json" # Constants to stabilize training MIN_TOTAL_LEN = 1.0 # Minimum allowed total duration MAX_WEIGHT = 0.05 # Cap to prevent a single label from dominating # Load train.json with open(TRAIN_JSON_PATH, "r") as f: dataset = json.load(f) # Collect durations for each act_cat act_cat_stats = defaultdict(float) for entry in dataset: labels = entry.get("frame_ann", {}).get("labels", []) for label in labels: start = label.get("start_t") end = label.get("end_t") act_cats = label.get("act_cat", []) if start is None or end is None: continue duration = max(0.01, end - start) for act in act_cats: act_cat_stats[act] += duration # Recompute weights cleaned_stats = {} for act, total_len in act_cat_stats.items(): total_len = max(MIN_TOTAL_LEN, total_len) if np.isfinite(total_len): raw_weight = 1.0 / total_len weight = min(raw_weight, MAX_WEIGHT) weight = max(0.0, weight) # ✅ Clip negatives to 0 cleaned_stats[act] = { "total_len": total_len, "total_weight": 1, "weight": weight } else: print(f"⚠️ Skipping act_cat '{act}' due to invalid total_len={total_len}") # Normalize weights total_weight_sum = sum(stats["weight"] for stats in cleaned_stats.values()) if total_weight_sum == 0: raise ValueError("❌ All weights are zero. Check act_cat labels or durations.") for stats in cleaned_stats.values(): stats["weight"] /= total_weight_sum # Save updated statistics with open(ACTION_STATS_PATH, "w") as f: json.dump(cleaned_stats, f, indent=2) print(f"✅ Saved {len(cleaned_stats)} act_cat entries to {ACTION_STATS_PATH}") print(f"🎯 Final normalized weight sum: {sum(stats['weight'] for stats in cleaned_stats.values()):.4f}")