batmanark's picture
first push
2b34b13 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7967394590377808,
"min": 0.7967394590377808,
"max": 2.856968641281128,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7572.2119140625,
"min": 7572.2119140625,
"max": 29163.935546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.986719131469727,
"min": 0.24048037827014923,
"max": 12.986719131469727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2532.41015625,
"min": 46.653194427490234,
"max": 2627.28125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06984490175714467,
"min": 0.06323237041343578,
"max": 0.07433682511456187,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27937960702857867,
"min": 0.2529294816537431,
"max": 0.36437480283590656,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20490658049489938,
"min": 0.10011287998093071,
"max": 0.2718416042625904,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8196263219795975,
"min": 0.40045151992372285,
"max": 1.3307207644570107,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.795454545454547,
"min": 3.340909090909091,
"max": 25.927272727272726,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1135.0,
"min": 147.0,
"max": 1426.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.795454545454547,
"min": 3.340909090909091,
"max": 25.927272727272726,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1135.0,
"min": 147.0,
"max": 1426.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749157648",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749158206"
},
"total": 557.8184926380001,
"count": 1,
"self": 0.6538714429999573,
"children": {
"run_training.setup": {
"total": 0.02731553299997813,
"count": 1,
"self": 0.02731553299997813
},
"TrainerController.start_learning": {
"total": 557.1373056620001,
"count": 1,
"self": 0.5226525020012787,
"children": {
"TrainerController._reset_env": {
"total": 3.895893172000001,
"count": 1,
"self": 3.895893172000001
},
"TrainerController.advance": {
"total": 552.6013464939989,
"count": 18192,
"self": 0.5678610509916098,
"children": {
"env_step": {
"total": 397.888348916008,
"count": 18192,
"self": 304.08426041501195,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.47043198199424,
"count": 18192,
"self": 1.7831242449771025,
"children": {
"TorchPolicy.evaluate": {
"total": 91.68730773701714,
"count": 18192,
"self": 91.68730773701714
}
}
},
"workers": {
"total": 0.3336565190018064,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 555.218418264013,
"count": 18192,
"is_parallel": true,
"self": 291.15670017302784,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0064958839999462725,
"count": 1,
"is_parallel": true,
"self": 0.00478006100024686,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017158229996994123,
"count": 10,
"is_parallel": true,
"self": 0.0017158229996994123
}
}
},
"UnityEnvironment.step": {
"total": 0.07032261099993775,
"count": 1,
"is_parallel": true,
"self": 0.000764896999839948,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040469099997153535,
"count": 1,
"is_parallel": true,
"self": 0.00040469099997153535
},
"communicator.exchange": {
"total": 0.0669497080000383,
"count": 1,
"is_parallel": true,
"self": 0.0669497080000383
},
"steps_from_proto": {
"total": 0.0022033150000879687,
"count": 1,
"is_parallel": true,
"self": 0.00042914200014365633,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017741729999443123,
"count": 10,
"is_parallel": true,
"self": 0.0017741729999443123
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 264.0617180909852,
"count": 18191,
"is_parallel": true,
"self": 12.626080302975879,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.702000017003229,
"count": 18191,
"is_parallel": true,
"self": 6.702000017003229
},
"communicator.exchange": {
"total": 205.65008899798693,
"count": 18191,
"is_parallel": true,
"self": 205.65008899798693
},
"steps_from_proto": {
"total": 39.083548773019174,
"count": 18191,
"is_parallel": true,
"self": 7.3965862590437155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.68696251397546,
"count": 181910,
"is_parallel": true,
"self": 31.68696251397546
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 154.1451365269993,
"count": 18192,
"self": 0.6895044049954322,
"children": {
"process_trajectory": {
"total": 33.02882986600309,
"count": 18192,
"self": 32.54128960500316,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4875402609999355,
"count": 4,
"self": 0.4875402609999355
}
}
},
"_update_policy": {
"total": 120.42680225600077,
"count": 90,
"self": 48.738676088000716,
"children": {
"TorchPPOOptimizer.update": {
"total": 71.68812616800005,
"count": 4587,
"self": 71.68812616800005
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1859999631269602e-06,
"count": 1,
"self": 1.1859999631269602e-06
},
"TrainerController._save_models": {
"total": 0.11741230799998448,
"count": 1,
"self": 0.000987420999990718,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11642488699999376,
"count": 1,
"self": 0.11642488699999376
}
}
}
}
}
}
}