Gyaneshere's picture
First Push
01d6b95 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4251229763031006,
"min": 0.4212987720966339,
"max": 1.4283965826034546,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12733.283203125,
"min": 12702.927734375,
"max": 43331.83984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989936.0,
"min": 29888.0,
"max": 989936.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989936.0,
"min": 29888.0,
"max": 989936.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4152663052082062,
"min": -0.08193006366491318,
"max": 0.5537070631980896,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 112.12190246582031,
"min": -19.827075958251953,
"max": 151.1620330810547,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0029726787470281124,
"min": -0.01859591342508793,
"max": 0.29031625390052795,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.8026232719421387,
"min": -4.8535332679748535,
"max": 69.96621704101562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06661175642059082,
"min": 0.06571279099064845,
"max": 0.07579420192396624,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9991763463088622,
"min": 0.6063536153917299,
"max": 1.047857467434369,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013982876283504692,
"min": 0.0010427224205250226,
"max": 0.01476541362602247,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20974314425257037,
"min": 0.010427224205250227,
"max": 0.20974314425257037,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.423997525366667e-06,
"min": 7.423997525366667e-06,
"max": 0.0002948472017176,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001113599628805,
"min": 0.0001113599628805,
"max": 0.0034916866361044997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247463333333333,
"min": 0.10247463333333333,
"max": 0.19828239999999997,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371195,
"min": 1.4778427,
"max": 2.5277988000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025721587000000003,
"min": 0.00025721587000000003,
"max": 0.00982841176,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00385823805,
"min": 0.00385823805,
"max": 0.11640316045,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012126035057008266,
"min": 0.011980419047176838,
"max": 0.5086643695831299,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18189053237438202,
"min": 0.16772586107254028,
"max": 4.069314956665039,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 435.7142857142857,
"min": 370.5903614457831,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30500.0,
"min": 16303.0,
"max": 33076.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4213856955724102,
"min": -0.9999500517733395,
"max": 1.5605190251732157,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 99.49699869006872,
"min": -31.998401656746864,
"max": 131.0835981145501,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4213856955724102,
"min": -0.9999500517733395,
"max": 1.5605190251732157,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 99.49699869006872,
"min": -31.998401656746864,
"max": 131.0835981145501,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05461933662275312,
"min": 0.047939407969395345,
"max": 10.057028827421805,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8233535635927183,
"min": 3.581199971784372,
"max": 170.9694900661707,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738821826",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738824213"
},
"total": 2387.00784104,
"count": 1,
"self": 0.6288574619993597,
"children": {
"run_training.setup": {
"total": 0.019915855000135707,
"count": 1,
"self": 0.019915855000135707
},
"TrainerController.start_learning": {
"total": 2386.3590677230004,
"count": 1,
"self": 1.5652479169643811,
"children": {
"TrainerController._reset_env": {
"total": 2.36656911099999,
"count": 1,
"self": 2.36656911099999
},
"TrainerController.advance": {
"total": 2382.3287705000357,
"count": 63684,
"self": 1.5806432520912495,
"children": {
"env_step": {
"total": 1634.9070544889987,
"count": 63684,
"self": 1465.4703608920167,
"children": {
"SubprocessEnvManager._take_step": {
"total": 168.5220572000685,
"count": 63684,
"self": 5.076025641085607,
"children": {
"TorchPolicy.evaluate": {
"total": 163.4460315589829,
"count": 62541,
"self": 163.4460315589829
}
}
},
"workers": {
"total": 0.9146363969134654,
"count": 63684,
"self": 0.0,
"children": {
"worker_root": {
"total": 2380.676942173904,
"count": 63684,
"is_parallel": true,
"self": 1039.8478337169254,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022113499999250052,
"count": 1,
"is_parallel": true,
"self": 0.0007019060003585764,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015094439995664288,
"count": 8,
"is_parallel": true,
"self": 0.0015094439995664288
}
}
},
"UnityEnvironment.step": {
"total": 0.052378244000010454,
"count": 1,
"is_parallel": true,
"self": 0.0005259299998670031,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000618569000153002,
"count": 1,
"is_parallel": true,
"self": 0.000618569000153002
},
"communicator.exchange": {
"total": 0.04952347700009341,
"count": 1,
"is_parallel": true,
"self": 0.04952347700009341
},
"steps_from_proto": {
"total": 0.0017102679998970416,
"count": 1,
"is_parallel": true,
"self": 0.0003784660000292206,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001331801999867821,
"count": 8,
"is_parallel": true,
"self": 0.001331801999867821
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1340.8291084569787,
"count": 63683,
"is_parallel": true,
"self": 34.2948741400553,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.549844133970282,
"count": 63683,
"is_parallel": true,
"self": 24.549844133970282
},
"communicator.exchange": {
"total": 1178.6418751799536,
"count": 63683,
"is_parallel": true,
"self": 1178.6418751799536
},
"steps_from_proto": {
"total": 103.34251500299956,
"count": 63683,
"is_parallel": true,
"self": 21.282767195044926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.05974780795464,
"count": 509464,
"is_parallel": true,
"self": 82.05974780795464
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 745.841072758946,
"count": 63684,
"self": 2.9820099889536777,
"children": {
"process_trajectory": {
"total": 137.5181512889926,
"count": 63684,
"self": 137.3023540579925,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21579723100012416,
"count": 2,
"self": 0.21579723100012416
}
}
},
"_update_policy": {
"total": 605.3409114809997,
"count": 453,
"self": 333.40858812795886,
"children": {
"TorchPPOOptimizer.update": {
"total": 271.93232335304083,
"count": 22785,
"self": 271.93232335304083
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0560002010606695e-06,
"count": 1,
"self": 1.0560002010606695e-06
},
"TrainerController._save_models": {
"total": 0.09847913900011918,
"count": 1,
"self": 0.0015750560005471925,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09690408299957198,
"count": 1,
"self": 0.09690408299957198
}
}
}
}
}
}
}