VinEuro's picture
First Push
7a29875
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.1471951007843018,
"min": 1.1471951007843018,
"max": 1.4732611179351807,
"count": 2
},
"Pyramids.Policy.Entropy.sum": {
"value": 34360.7890625,
"min": 34360.7890625,
"max": 44692.84765625,
"count": 2
},
"Pyramids.Step.mean": {
"value": 59929.0,
"min": 29952.0,
"max": 59929.0,
"count": 2
},
"Pyramids.Step.sum": {
"value": 59929.0,
"min": 29952.0,
"max": 59929.0,
"count": 2
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.13761763274669647,
"min": -0.19461220502853394,
"max": -0.13761763274669647,
"count": 2
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -33.16584777832031,
"min": -46.12309265136719,
"max": -33.16584777832031,
"count": 2
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.2759995758533478,
"min": 0.22521837055683136,
"max": 0.2759995758533478,
"count": 2
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 66.51589965820312,
"min": 53.37675476074219,
"max": 66.51589965820312,
"count": 2
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07122987336505054,
"min": 0.07122987336505054,
"max": 0.07248519900959706,
"count": 2
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5698389869204044,
"min": 0.5073963930671794,
"max": 0.5698389869204044,
"count": 2
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.001456368120560539,
"min": 0.001456368120560539,
"max": 0.00627643768855523,
"count": 2
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.011650944964484312,
"min": 0.011650944964484312,
"max": 0.04393506381988661,
"count": 2
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.054445148520833e-05,
"min": 7.054445148520833e-05,
"max": 0.00021917716979809524,
"count": 2
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0005643556118816666,
"min": 0.0005643556118816666,
"max": 0.0015342401885866668,
"count": 2
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12351479166666668,
"min": 0.12351479166666668,
"max": 0.1730590476190476,
"count": 2
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.9881183333333334,
"min": 0.9881183333333334,
"max": 1.2114133333333332,
"count": 2
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0023591276875000004,
"min": 0.0023591276875000004,
"max": 0.007308598857142857,
"count": 2
},
"Pyramids.Policy.Beta.sum": {
"value": 0.018873021500000003,
"min": 0.018873021500000003,
"max": 0.051160192,
"count": 2
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.16494983434677124,
"min": 0.16494983434677124,
"max": 0.45264026522636414,
"count": 2
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.31959867477417,
"min": 1.31959867477417,
"max": 3.1684818267822266,
"count": 2
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 988.8484848484849,
"min": 988.8484848484849,
"max": 999.0,
"count": 2
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32632.0,
"min": 15984.0,
"max": 32632.0,
"count": 2
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9292121741807822,
"min": -1.0000000521540642,
"max": -0.9292121741807822,
"count": 2
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -30.664001747965813,
"min": -30.664001747965813,
"max": -16.000000834465027,
"count": 2
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9292121741807822,
"min": -1.0000000521540642,
"max": -0.9292121741807822,
"count": 2
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -30.664001747965813,
"min": -30.664001747965813,
"max": -16.000000834465027,
"count": 2
},
"Pyramids.Policy.RndReward.mean": {
"value": 2.2325969616120513,
"min": 2.2325969616120513,
"max": 9.47318115644157,
"count": 2
},
"Pyramids.Policy.RndReward.sum": {
"value": 73.67569973319769,
"min": 73.67569973319769,
"max": 151.5708985030651,
"count": 2
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690664701",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690664879"
},
"total": 177.1562953839998,
"count": 1,
"self": 0.5816066199995475,
"children": {
"run_training.setup": {
"total": 0.042408409000017855,
"count": 1,
"self": 0.042408409000017855
},
"TrainerController.start_learning": {
"total": 176.53228035500024,
"count": 1,
"self": 0.13696789698633438,
"children": {
"TrainerController._reset_env": {
"total": 1.245273707999786,
"count": 1,
"self": 1.245273707999786
},
"TrainerController.advance": {
"total": 175.02635205501429,
"count": 3769,
"self": 0.1308116690179304,
"children": {
"env_step": {
"total": 103.44430795000108,
"count": 3769,
"self": 94.5474765090039,
"children": {
"SubprocessEnvManager._take_step": {
"total": 8.815306453007452,
"count": 3769,
"self": 0.4142690809908345,
"children": {
"TorchPolicy.evaluate": {
"total": 8.401037372016617,
"count": 3768,
"self": 8.401037372016617
}
}
},
"workers": {
"total": 0.08152498798972374,
"count": 3769,
"self": 0.0,
"children": {
"worker_root": {
"total": 175.87849306599583,
"count": 3769,
"is_parallel": true,
"self": 91.02997063798011,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020641440000872535,
"count": 1,
"is_parallel": true,
"self": 0.0006874840000818949,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013766600000053586,
"count": 8,
"is_parallel": true,
"self": 0.0013766600000053586
}
}
},
"UnityEnvironment.step": {
"total": 0.05932117100019241,
"count": 1,
"is_parallel": true,
"self": 0.0006918000008226954,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003640339996309194,
"count": 1,
"is_parallel": true,
"self": 0.0003640339996309194
},
"communicator.exchange": {
"total": 0.05609222700013561,
"count": 1,
"is_parallel": true,
"self": 0.05609222700013561
},
"steps_from_proto": {
"total": 0.0021731099996031844,
"count": 1,
"is_parallel": true,
"self": 0.00045107399910193635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001722036000501248,
"count": 8,
"is_parallel": true,
"self": 0.001722036000501248
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 84.84852242801571,
"count": 3768,
"is_parallel": true,
"self": 2.744761659982032,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.4664704090018859,
"count": 3768,
"is_parallel": true,
"self": 1.4664704090018859
},
"communicator.exchange": {
"total": 72.62305305801192,
"count": 3768,
"is_parallel": true,
"self": 72.62305305801192
},
"steps_from_proto": {
"total": 8.014237301019875,
"count": 3768,
"is_parallel": true,
"self": 1.694154282010004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.320083019009871,
"count": 30144,
"is_parallel": true,
"self": 6.320083019009871
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 71.45123243599528,
"count": 3769,
"self": 0.17194651200634326,
"children": {
"process_trajectory": {
"total": 8.348048200987705,
"count": 3769,
"self": 8.348048200987705
},
"_update_policy": {
"total": 62.93123772300123,
"count": 15,
"self": 27.06944110000086,
"children": {
"TorchPPOOptimizer.update": {
"total": 35.86179662300037,
"count": 1365,
"self": 35.86179662300037
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0839999049494509e-06,
"count": 1,
"self": 1.0839999049494509e-06
},
"TrainerController._save_models": {
"total": 0.12368561099992803,
"count": 1,
"self": 0.0015742589998808398,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12211135200004719,
"count": 1,
"self": 0.12211135200004719
}
}
}
}
}
}
}