mlwithrakesh's picture
First Push
2a20586
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41606563329696655,
"min": 0.4107342064380646,
"max": 1.525533676147461,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12315.54296875,
"min": 12315.54296875,
"max": 46278.58984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989900.0,
"min": 29952.0,
"max": 989900.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989900.0,
"min": 29952.0,
"max": 989900.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4332033395767212,
"min": -0.10359484702348709,
"max": 0.5029125809669495,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 114.36568450927734,
"min": -24.862762451171875,
"max": 137.29513549804688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.009853750467300415,
"min": -0.009853750467300415,
"max": 0.1680927276611328,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.6013901233673096,
"min": -2.6779074668884277,
"max": 40.342254638671875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06460485512500626,
"min": 0.06460485512500626,
"max": 0.0738383179497987,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9044679717500876,
"min": 0.4917989045538105,
"max": 1.0388535228557882,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013890681896241822,
"min": 0.0003874696088909709,
"max": 0.014395927511564365,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19446954654738552,
"min": 0.00464963530669165,
"max": 0.2015429851619011,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.488997503700001e-06,
"min": 7.488997503700001e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010484596505180002,
"min": 0.00010484596505180002,
"max": 0.00302049029317,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249630000000001,
"min": 0.10249630000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349482000000002,
"min": 1.3691136000000002,
"max": 2.4010450000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025938037000000007,
"min": 0.00025938037000000007,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003631325180000001,
"min": 0.003631325180000001,
"max": 0.100712317,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01042268704622984,
"min": 0.01042268704622984,
"max": 0.26630768179893494,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1459176242351532,
"min": 0.1459176242351532,
"max": 1.8641538619995117,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 426.2686567164179,
"min": 381.1463414634146,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28560.0,
"min": 15984.0,
"max": 35241.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.484164162803052,
"min": -1.0000000521540642,
"max": 1.5897486292429872,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 99.43899890780449,
"min": -32.000001668930054,
"max": 123.98719765990973,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.484164162803052,
"min": -1.0000000521540642,
"max": 1.5897486292429872,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 99.43899890780449,
"min": -32.000001668930054,
"max": 123.98719765990973,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04570462418383043,
"min": 0.043426890079077914,
"max": 5.317265393212438,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.062209820316639,
"min": 3.062209820316639,
"max": 85.076246291399,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701542797",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701545042"
},
"total": 2244.995034052,
"count": 1,
"self": 1.2227976330000274,
"children": {
"run_training.setup": {
"total": 0.07848659699993732,
"count": 1,
"self": 0.07848659699993732
},
"TrainerController.start_learning": {
"total": 2243.693749822,
"count": 1,
"self": 1.5371466909141418,
"children": {
"TrainerController._reset_env": {
"total": 3.3263148340001862,
"count": 1,
"self": 3.3263148340001862
},
"TrainerController.advance": {
"total": 2238.7097226040855,
"count": 63565,
"self": 1.5854743721906743,
"children": {
"env_step": {
"total": 1584.7070558309422,
"count": 63565,
"self": 1439.423677076966,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.3081013049691,
"count": 63565,
"self": 5.173911401970372,
"children": {
"TorchPolicy.evaluate": {
"total": 139.13418990299874,
"count": 62563,
"self": 139.13418990299874
}
}
},
"workers": {
"total": 0.9752774490070806,
"count": 63565,
"self": 0.0,
"children": {
"worker_root": {
"total": 2238.5899523400108,
"count": 63565,
"is_parallel": true,
"self": 928.2958425600084,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025737309999840363,
"count": 1,
"is_parallel": true,
"self": 0.0007755119997909787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017982190001930576,
"count": 8,
"is_parallel": true,
"self": 0.0017982190001930576
}
}
},
"UnityEnvironment.step": {
"total": 0.047045125000067856,
"count": 1,
"is_parallel": true,
"self": 0.0006567120001363946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004884980000952055,
"count": 1,
"is_parallel": true,
"self": 0.0004884980000952055
},
"communicator.exchange": {
"total": 0.04408489499996904,
"count": 1,
"is_parallel": true,
"self": 0.04408489499996904
},
"steps_from_proto": {
"total": 0.0018150199998672178,
"count": 1,
"is_parallel": true,
"self": 0.000371745999700579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014432740001666389,
"count": 8,
"is_parallel": true,
"self": 0.0014432740001666389
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1310.2941097800024,
"count": 63564,
"is_parallel": true,
"self": 35.682327567098355,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.521036465934458,
"count": 63564,
"is_parallel": true,
"self": 26.521036465934458
},
"communicator.exchange": {
"total": 1142.3003088319863,
"count": 63564,
"is_parallel": true,
"self": 1142.3003088319863
},
"steps_from_proto": {
"total": 105.79043691498327,
"count": 63564,
"is_parallel": true,
"self": 21.804844262037022,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.98559265294625,
"count": 508512,
"is_parallel": true,
"self": 83.98559265294625
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 652.4171924009524,
"count": 63565,
"self": 2.859226858943657,
"children": {
"process_trajectory": {
"total": 133.40946616101337,
"count": 63565,
"self": 133.2024242170137,
"children": {
"RLTrainer._checkpoint": {
"total": 0.207041943999684,
"count": 2,
"self": 0.207041943999684
}
}
},
"_update_policy": {
"total": 516.1484993809954,
"count": 434,
"self": 307.6682184119802,
"children": {
"TorchPPOOptimizer.update": {
"total": 208.4802809690152,
"count": 22842,
"self": 208.4802809690152
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3330000001587905e-06,
"count": 1,
"self": 1.3330000001587905e-06
},
"TrainerController._save_models": {
"total": 0.12056436000011672,
"count": 1,
"self": 0.0020359590002954064,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11852840099982132,
"count": 1,
"self": 0.11852840099982132
}
}
}
}
}
}
}