dgsilvia's picture
First Push
d53faec verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9251018762588501,
"min": 0.9251018762588501,
"max": 2.8640053272247314,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8792.16796875,
"min": 8792.16796875,
"max": 29235.765625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.143349647521973,
"min": 0.4893704056739807,
"max": 13.143349647521973,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2562.953125,
"min": 94.93785858154297,
"max": 2664.423828125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0689754861113233,
"min": 0.06468883513811947,
"max": 0.07412857223186851,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2759019444452932,
"min": 0.26427653851950544,
"max": 0.36953745265311544,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18644782360277923,
"min": 0.14551459030503883,
"max": 0.3142560801085304,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7457912944111169,
"min": 0.5820583612201553,
"max": 1.571280400542652,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.181818181818183,
"min": 4.295454545454546,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1152.0,
"min": 189.0,
"max": 1411.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.181818181818183,
"min": 4.295454545454546,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1152.0,
"min": 189.0,
"max": 1411.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756166958",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756167379"
},
"total": 421.06376749099996,
"count": 1,
"self": 0.43612421599999607,
"children": {
"run_training.setup": {
"total": 0.02643833799999129,
"count": 1,
"self": 0.02643833799999129
},
"TrainerController.start_learning": {
"total": 420.601204937,
"count": 1,
"self": 0.34201157699669693,
"children": {
"TrainerController._reset_env": {
"total": 3.1517561770000384,
"count": 1,
"self": 3.1517561770000384
},
"TrainerController.advance": {
"total": 417.0160653910032,
"count": 18192,
"self": 0.3562004700048078,
"children": {
"env_step": {
"total": 295.8988434629949,
"count": 18192,
"self": 228.91636873300507,
"children": {
"SubprocessEnvManager._take_step": {
"total": 66.78041471699368,
"count": 18192,
"self": 1.2248923619819152,
"children": {
"TorchPolicy.evaluate": {
"total": 65.55552235501176,
"count": 18192,
"self": 65.55552235501176
}
}
},
"workers": {
"total": 0.20206001299612808,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 419.2292588990064,
"count": 18192,
"is_parallel": true,
"self": 217.94010855100788,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008817243999999391,
"count": 1,
"is_parallel": true,
"self": 0.007266743999934988,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015505000000644031,
"count": 10,
"is_parallel": true,
"self": 0.0015505000000644031
}
}
},
"UnityEnvironment.step": {
"total": 0.03513774300000705,
"count": 1,
"is_parallel": true,
"self": 0.000564358000019638,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039191600001231564,
"count": 1,
"is_parallel": true,
"self": 0.00039191600001231564
},
"communicator.exchange": {
"total": 0.03231305499997461,
"count": 1,
"is_parallel": true,
"self": 0.03231305499997461
},
"steps_from_proto": {
"total": 0.0018684140000004845,
"count": 1,
"is_parallel": true,
"self": 0.0003550129999325691,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015134010000679154,
"count": 10,
"is_parallel": true,
"self": 0.0015134010000679154
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 201.28915034799854,
"count": 18191,
"is_parallel": true,
"self": 9.754624037000497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.26008878099924,
"count": 18191,
"is_parallel": true,
"self": 5.26008878099924
},
"communicator.exchange": {
"total": 155.5695847770006,
"count": 18191,
"is_parallel": true,
"self": 155.5695847770006
},
"steps_from_proto": {
"total": 30.704852752998192,
"count": 18191,
"is_parallel": true,
"self": 5.353876294035331,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.35097645896286,
"count": 181910,
"is_parallel": true,
"self": 25.35097645896286
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 120.76102145800354,
"count": 18192,
"self": 0.4114278340019837,
"children": {
"process_trajectory": {
"total": 26.436107604000995,
"count": 18192,
"self": 26.020500054000763,
"children": {
"RLTrainer._checkpoint": {
"total": 0.41560755000023164,
"count": 4,
"self": 0.41560755000023164
}
}
},
"_update_policy": {
"total": 93.91348602000056,
"count": 90,
"self": 39.089822258999334,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.82366376100123,
"count": 4587,
"self": 54.82366376100123
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.509999472356867e-07,
"count": 1,
"self": 9.509999472356867e-07
},
"TrainerController._save_models": {
"total": 0.09137084100007087,
"count": 1,
"self": 0.0008381800000734074,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09053266099999746,
"count": 1,
"self": 0.09053266099999746
}
}
}
}
}
}
}