Nasree's picture
First Push
b8e3000
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40617018938064575,
"min": 0.369667112827301,
"max": 1.4152188301086426,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12146.11328125,
"min": 11190.912109375,
"max": 42932.078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.561475932598114,
"min": -0.09629026800394058,
"max": 0.561475932598114,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 157.2132568359375,
"min": -23.01337432861328,
"max": 157.2132568359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0005570047651417553,
"min": -0.002344713779166341,
"max": 0.3042084574699402,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.15596133470535278,
"min": -0.5885231494903564,
"max": 73.0100326538086,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06857021602595018,
"min": 0.06575287795192411,
"max": 0.07368616528109886,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0285532403892526,
"min": 0.5084936088054768,
"max": 1.0751288530203358,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015416699145377303,
"min": 0.00011032304136875945,
"max": 0.01607185751033415,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23125048718065955,
"min": 0.001213553455056354,
"max": 0.23125048718065955,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.442117519326664e-06,
"min": 7.442117519326664e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011163176278989996,
"min": 0.00011163176278989996,
"max": 0.003225551524816199,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248067333333334,
"min": 0.10248067333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372101,
"min": 1.3691136000000002,
"max": 2.3172423,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000257819266,
"min": 0.000257819266,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038672889899999995,
"min": 0.0038672889899999995,
"max": 0.10753086162,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011612558737397194,
"min": 0.011612558737397194,
"max": 0.45031505823135376,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17418837547302246,
"min": 0.1709676831960678,
"max": 3.152205467224121,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.6741573033708,
"min": 345.109756097561,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30765.0,
"min": 15984.0,
"max": 32998.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5419325672843482,
"min": -1.0000000521540642,
"max": 1.581702418897937,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 137.231998488307,
"min": -32.000001668930054,
"max": 137.231998488307,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5419325672843482,
"min": -1.0000000521540642,
"max": 1.581702418897937,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 137.231998488307,
"min": -32.000001668930054,
"max": 137.231998488307,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04128233964745939,
"min": 0.04128233964745939,
"max": 8.767852799966931,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6741282286238857,
"min": 3.5938717161188833,
"max": 140.2856447994709,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678649200",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678651566"
},
"total": 2366.746772257,
"count": 1,
"self": 0.4869314289999238,
"children": {
"run_training.setup": {
"total": 0.11426994499998955,
"count": 1,
"self": 0.11426994499998955
},
"TrainerController.start_learning": {
"total": 2366.145570883,
"count": 1,
"self": 1.53678815404146,
"children": {
"TrainerController._reset_env": {
"total": 9.353404302000001,
"count": 1,
"self": 9.353404302000001
},
"TrainerController.advance": {
"total": 2355.158402126958,
"count": 63607,
"self": 1.6545828159732991,
"children": {
"env_step": {
"total": 1659.7987794269716,
"count": 63607,
"self": 1539.6224163379698,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.24217019103133,
"count": 63607,
"self": 5.223228071022504,
"children": {
"TorchPolicy.evaluate": {
"total": 114.01894212000883,
"count": 62558,
"self": 114.01894212000883
}
}
},
"workers": {
"total": 0.9341928979704903,
"count": 63607,
"self": 0.0,
"children": {
"worker_root": {
"total": 2360.0372698160186,
"count": 63607,
"is_parallel": true,
"self": 951.9878399540337,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004720517999999174,
"count": 1,
"is_parallel": true,
"self": 0.003363345999957801,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013571720000413734,
"count": 8,
"is_parallel": true,
"self": 0.0013571720000413734
}
}
},
"UnityEnvironment.step": {
"total": 0.07745933600000399,
"count": 1,
"is_parallel": true,
"self": 0.0005737449999969613,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004900289999909546,
"count": 1,
"is_parallel": true,
"self": 0.0004900289999909546
},
"communicator.exchange": {
"total": 0.07455900000002202,
"count": 1,
"is_parallel": true,
"self": 0.07455900000002202
},
"steps_from_proto": {
"total": 0.0018365619999940463,
"count": 1,
"is_parallel": true,
"self": 0.0005418420000182778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012947199999757686,
"count": 8,
"is_parallel": true,
"self": 0.0012947199999757686
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1408.049429861985,
"count": 63606,
"is_parallel": true,
"self": 33.685560570938605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.02109227200259,
"count": 63606,
"is_parallel": true,
"self": 24.02109227200259
},
"communicator.exchange": {
"total": 1251.5753313090484,
"count": 63606,
"is_parallel": true,
"self": 1251.5753313090484
},
"steps_from_proto": {
"total": 98.76744570999529,
"count": 63606,
"is_parallel": true,
"self": 21.499778229007973,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.26766748098731,
"count": 508848,
"is_parallel": true,
"self": 77.26766748098731
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 693.7050398840136,
"count": 63607,
"self": 2.6994267890302126,
"children": {
"process_trajectory": {
"total": 130.30592685997834,
"count": 63607,
"self": 130.0844418909782,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22148496900013015,
"count": 2,
"self": 0.22148496900013015
}
}
},
"_update_policy": {
"total": 560.699686235005,
"count": 434,
"self": 357.8411404290075,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.85854580599744,
"count": 22836,
"self": 202.85854580599744
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.350001164420974e-07,
"count": 1,
"self": 9.350001164420974e-07
},
"TrainerController._save_models": {
"total": 0.09697536500016213,
"count": 1,
"self": 0.001510387000053015,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09546497800010911,
"count": 1,
"self": 0.09546497800010911
}
}
}
}
}
}
}