MohanThota's picture
First Push
99e278b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4061567187309265,
"min": 0.4061567187309265,
"max": 0.9414515495300293,
"count": 31
},
"Pyramids.Policy.Entropy.sum": {
"value": 12184.701171875,
"min": 11500.4296875,
"max": 28348.98828125,
"count": 31
},
"Pyramids.Step.mean": {
"value": 989945.0,
"min": 89958.0,
"max": 989945.0,
"count": 31
},
"Pyramids.Step.sum": {
"value": 989945.0,
"min": 89958.0,
"max": 989945.0,
"count": 31
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.573209285736084,
"min": -0.1336812674999237,
"max": 0.6449949741363525,
"count": 31
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.07180786132812,
"min": -25.01500701904297,
"max": 183.17857360839844,
"count": 31
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025620093569159508,
"min": -0.005597613286226988,
"max": 0.21868665516376495,
"count": 31
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.199246406555176,
"min": -1.405000925064087,
"max": 35.156524658203125,
"count": 31
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848251701324314,
"min": 0.06463703170946726,
"max": 0.07310043074482797,
"count": 31
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9587552381854039,
"min": 0.2114214927520758,
"max": 1.0689573684004228,
"count": 31
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012869189715101604,
"min": 0.00025569497228945755,
"max": 0.016525093135895706,
"count": 31
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18016865601142246,
"min": 0.001568325132211612,
"max": 0.2478763970384356,
"count": 31
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.724754567971431e-06,
"min": 7.724754567971431e-06,
"max": 0.0002753238082254,
"count": 31
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010814656395160002,
"min": 0.00010814656395160002,
"max": 0.0032572898142367995,
"count": 31
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257488571428572,
"min": 0.10257488571428572,
"max": 0.19177460000000002,
"count": 31
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360484,
"min": 0.5753238,
"max": 2.4849949999999996,
"count": 31
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002672310828571429,
"min": 0.0002672310828571429,
"max": 0.00917828254,
"count": 31
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037412351600000005,
"min": 0.0037412351600000005,
"max": 0.10859774367999998,
"count": 31
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016097713261842728,
"min": 0.015533889643847942,
"max": 0.1160392090678215,
"count": 31
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2253679782152176,
"min": 0.21747446060180664,
"max": 0.7664591073989868,
"count": 31
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 342.3448275862069,
"min": 290.90291262135923,
"max": 999.0,
"count": 31
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29784.0,
"min": 15984.0,
"max": 33870.0,
"count": 31
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.660363620147109,
"min": -1.0000000521540642,
"max": 1.7088557514720237,
"count": 31
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 146.1119985729456,
"min": -28.99360167235136,
"max": 177.72099815309048,
"count": 31
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.660363620147109,
"min": -1.0000000521540642,
"max": 1.7088557514720237,
"count": 31
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 146.1119985729456,
"min": -28.99360167235136,
"max": 177.72099815309048,
"count": 31
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05615975286458375,
"min": 0.04664962318803681,
"max": 1.1974169085423152,
"count": 31
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.9420582520833705,
"min": 4.76479971196386,
"max": 27.701961785554886,
"count": 31
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 31
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 31
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1754093653",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1754095721"
},
"total": 2067.2343767899997,
"count": 1,
"self": 0.47621682399994825,
"children": {
"run_training.setup": {
"total": 0.0201415490000727,
"count": 1,
"self": 0.0201415490000727
},
"TrainerController.start_learning": {
"total": 2066.7380184169997,
"count": 1,
"self": 1.1062399109459875,
"children": {
"TrainerController._reset_env": {
"total": 3.2342150989998117,
"count": 1,
"self": 3.2342150989998117
},
"TrainerController.advance": {
"total": 2062.3155083260535,
"count": 59274,
"self": 1.1666737150062545,
"children": {
"env_step": {
"total": 1441.4700920499713,
"count": 59274,
"self": 1310.3316942368765,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.4779282991085,
"count": 59274,
"self": 4.0256696371484395,
"children": {
"TorchPolicy.evaluate": {
"total": 126.45225866196006,
"count": 57941,
"self": 126.45225866196006
}
}
},
"workers": {
"total": 0.6604695139862997,
"count": 59274,
"self": 0.0,
"children": {
"worker_root": {
"total": 2062.5832110079286,
"count": 59274,
"is_parallel": true,
"self": 854.1469011739946,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023211590000755677,
"count": 1,
"is_parallel": true,
"self": 0.0006634699998357974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016576890002397704,
"count": 8,
"is_parallel": true,
"self": 0.0016576890002397704
}
}
},
"UnityEnvironment.step": {
"total": 0.07986181199976272,
"count": 1,
"is_parallel": true,
"self": 0.0005444439993880223,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004716150001513597,
"count": 1,
"is_parallel": true,
"self": 0.0004716150001513597
},
"communicator.exchange": {
"total": 0.07729110900027081,
"count": 1,
"is_parallel": true,
"self": 0.07729110900027081
},
"steps_from_proto": {
"total": 0.0015546439999525319,
"count": 1,
"is_parallel": true,
"self": 0.0003257639996263606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012288800003261713,
"count": 8,
"is_parallel": true,
"self": 0.0012288800003261713
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1208.436309833934,
"count": 59273,
"is_parallel": true,
"self": 28.689499067837005,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.310085046074164,
"count": 59273,
"is_parallel": true,
"self": 20.310085046074164
},
"communicator.exchange": {
"total": 1074.967391628927,
"count": 59273,
"is_parallel": true,
"self": 1074.967391628927
},
"steps_from_proto": {
"total": 84.46933409109579,
"count": 59273,
"is_parallel": true,
"self": 16.39969484661424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.06963924448155,
"count": 474184,
"is_parallel": true,
"self": 68.06963924448155
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 619.678742561076,
"count": 59274,
"self": 2.1306784271464494,
"children": {
"process_trajectory": {
"total": 116.52588908893904,
"count": 59274,
"self": 116.32646997693882,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19941911200021423,
"count": 2,
"self": 0.19941911200021423
}
}
},
"_update_policy": {
"total": 501.0221750449905,
"count": 419,
"self": 280.73122939198856,
"children": {
"TorchPPOOptimizer.update": {
"total": 220.29094565300193,
"count": 21159,
"self": 220.29094565300193
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.950000847107731e-07,
"count": 1,
"self": 8.950000847107731e-07
},
"TrainerController._save_models": {
"total": 0.0820541860002777,
"count": 1,
"self": 0.0016340140000465908,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0804201720002311,
"count": 1,
"self": 0.0804201720002311
}
}
}
}
}
}
}