Eugeneng1150's picture
First Push
6c1e02c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7810431122779846,
"min": 0.7810431122779846,
"max": 2.8477697372436523,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7423.03369140625,
"min": 7423.03369140625,
"max": 29070.033203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.08940315246582,
"min": 0.40348178148269653,
"max": 13.08940315246582,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2552.43359375,
"min": 78.27546691894531,
"max": 2655.8310546875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06945670676884452,
"min": 0.062477663467072085,
"max": 0.07947212861503894,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2778268270753781,
"min": 0.25290907612846547,
"max": 0.3973606430751947,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1992388349756891,
"min": 0.1331193649333299,
"max": 0.2873291909402492,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7969553399027564,
"min": 0.5324774597333196,
"max": 1.4188257715865678,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.568181818181817,
"min": 3.8181818181818183,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1125.0,
"min": 168.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.568181818181817,
"min": 3.8181818181818183,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1125.0,
"min": 168.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1767344821",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1767345389"
},
"total": 567.6434972799998,
"count": 1,
"self": 0.5352509379999901,
"children": {
"run_training.setup": {
"total": 0.0337996859998384,
"count": 1,
"self": 0.0337996859998384
},
"TrainerController.start_learning": {
"total": 567.074446656,
"count": 1,
"self": 0.5876006640050946,
"children": {
"TrainerController._reset_env": {
"total": 3.336893699999564,
"count": 1,
"self": 3.336893699999564
},
"TrainerController.advance": {
"total": 563.078471320996,
"count": 18192,
"self": 0.620691602986426,
"children": {
"env_step": {
"total": 406.26175522302447,
"count": 18192,
"self": 352.73940189305176,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.148775361986736,
"count": 18192,
"self": 1.8144699319941537,
"children": {
"TorchPolicy.evaluate": {
"total": 51.33430542999258,
"count": 18192,
"self": 51.33430542999258
}
}
},
"workers": {
"total": 0.3735779679859661,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 564.2765761320147,
"count": 18192,
"is_parallel": true,
"self": 258.49283732598406,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0059701860000132,
"count": 1,
"is_parallel": true,
"self": 0.004307611999138317,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016625740008748835,
"count": 10,
"is_parallel": true,
"self": 0.0016625740008748835
}
}
},
"UnityEnvironment.step": {
"total": 0.07849196000006486,
"count": 1,
"is_parallel": true,
"self": 0.0007445759997608548,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0028831469999204273,
"count": 1,
"is_parallel": true,
"self": 0.0028831469999204273
},
"communicator.exchange": {
"total": 0.07262334300003204,
"count": 1,
"is_parallel": true,
"self": 0.07262334300003204
},
"steps_from_proto": {
"total": 0.0022408940003515454,
"count": 1,
"is_parallel": true,
"self": 0.000430509000125312,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018103850002262334,
"count": 10,
"is_parallel": true,
"self": 0.0018103850002262334
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 305.78373880603067,
"count": 18191,
"is_parallel": true,
"self": 14.47968257501634,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.750175043987383,
"count": 18191,
"is_parallel": true,
"self": 7.750175043987383
},
"communicator.exchange": {
"total": 235.18599861001348,
"count": 18191,
"is_parallel": true,
"self": 235.18599861001348
},
"steps_from_proto": {
"total": 48.36788257701346,
"count": 18191,
"is_parallel": true,
"self": 8.819182463074412,
"children": {
"_process_rank_one_or_two_observation": {
"total": 39.54870011393905,
"count": 181910,
"is_parallel": true,
"self": 39.54870011393905
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 156.19602449498507,
"count": 18192,
"self": 0.7877124740098225,
"children": {
"process_trajectory": {
"total": 29.190257234977253,
"count": 18192,
"self": 28.743928147977385,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4463290869998673,
"count": 4,
"self": 0.4463290869998673
}
}
},
"_update_policy": {
"total": 126.21805478599799,
"count": 90,
"self": 48.991257941999265,
"children": {
"TorchPPOOptimizer.update": {
"total": 77.22679684399873,
"count": 4587,
"self": 77.22679684399873
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1559995982679538e-06,
"count": 1,
"self": 1.1559995982679538e-06
},
"TrainerController._save_models": {
"total": 0.07147981499974776,
"count": 1,
"self": 0.0009732739995342854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07050654100021347,
"count": 1,
"self": 0.07050654100021347
}
}
}
}
}
}
}