ppo-PyramidsRND / run_logs /timers.json
vhenric's picture
First Push
acfc52a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3648831248283386,
"min": 0.3648831248283386,
"max": 1.3712139129638672,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11010.712890625,
"min": 11010.712890625,
"max": 41597.14453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989982.0,
"min": 29901.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989982.0,
"min": 29901.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4961288571357727,
"min": -0.10742708295583725,
"max": 0.596433162689209,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 134.94705200195312,
"min": -25.88992691040039,
"max": 168.19415283203125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02636062540113926,
"min": -0.006849062629044056,
"max": 0.6467614769935608,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.170090198516846,
"min": -1.7876052856445312,
"max": 153.282470703125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06840785781017532,
"min": 0.065005086504381,
"max": 0.07815419535633132,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9577100093424545,
"min": 0.6026216409415772,
"max": 1.0485619737506462,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017396076677836016,
"min": 0.0006475950267519003,
"max": 0.01793326485563912,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24354507348970422,
"min": 0.007771140321022804,
"max": 0.25106570797894767,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.198233314907142e-06,
"min": 7.198233314907142e-06,
"max": 0.0002948457392180875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010077526640869999,
"min": 0.00010077526640869999,
"max": 0.0033827867724044998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10239937857142857,
"min": 0.10239937857142857,
"max": 0.19828191250000002,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4335913,
"min": 1.4335913,
"max": 2.5275955,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002496979192857143,
"min": 0.0002496979192857143,
"max": 0.00982836305875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00349577087,
"min": 0.00349577087,
"max": 0.11278679044999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012025290168821812,
"min": 0.012025290168821812,
"max": 0.5156056880950928,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1683540642261505,
"min": 0.1683540642261505,
"max": 4.124845504760742,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 353.6279069767442,
"min": 317.1685393258427,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30412.0,
"min": 16444.0,
"max": 32559.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5765813678329768,
"min": -0.9999750521965325,
"max": 1.6154022313905565,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 135.585997633636,
"min": -31.99920167028904,
"max": 143.77079859375954,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5765813678329768,
"min": -0.9999750521965325,
"max": 1.6154022313905565,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 135.585997633636,
"min": -31.99920167028904,
"max": 143.77079859375954,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04396782970882689,
"min": 0.042818832990569926,
"max": 11.05958467020708,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.781233354959113,
"min": 3.682419637189014,
"max": 188.01293939352036,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749478125",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749480568"
},
"total": 2442.331029139,
"count": 1,
"self": 0.5475853719999577,
"children": {
"run_training.setup": {
"total": 0.028844751999940854,
"count": 1,
"self": 0.028844751999940854
},
"TrainerController.start_learning": {
"total": 2441.754599015,
"count": 1,
"self": 1.74587964797729,
"children": {
"TrainerController._reset_env": {
"total": 2.391739803000064,
"count": 1,
"self": 2.391739803000064
},
"TrainerController.advance": {
"total": 2437.5309200950232,
"count": 63984,
"self": 1.8157684171355868,
"children": {
"env_step": {
"total": 1733.6120389379864,
"count": 63984,
"self": 1555.703864930917,
"children": {
"SubprocessEnvManager._take_step": {
"total": 176.9379571510433,
"count": 63984,
"self": 5.197221004036464,
"children": {
"TorchPolicy.evaluate": {
"total": 171.74073614700683,
"count": 62562,
"self": 171.74073614700683
}
}
},
"workers": {
"total": 0.9702168560261271,
"count": 63984,
"self": 0.0,
"children": {
"worker_root": {
"total": 2436.128624947966,
"count": 63984,
"is_parallel": true,
"self": 1008.4336283559812,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019348039998021704,
"count": 1,
"is_parallel": true,
"self": 0.000657402999650003,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012774010001521674,
"count": 8,
"is_parallel": true,
"self": 0.0012774010001521674
}
}
},
"UnityEnvironment.step": {
"total": 0.04673753699989902,
"count": 1,
"is_parallel": true,
"self": 0.0005618779998712853,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046319400007632794,
"count": 1,
"is_parallel": true,
"self": 0.00046319400007632794
},
"communicator.exchange": {
"total": 0.043995249999852604,
"count": 1,
"is_parallel": true,
"self": 0.043995249999852604
},
"steps_from_proto": {
"total": 0.001717215000098804,
"count": 1,
"is_parallel": true,
"self": 0.00043865700058631774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012785579995124863,
"count": 8,
"is_parallel": true,
"self": 0.0012785579995124863
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1427.6949965919846,
"count": 63983,
"is_parallel": true,
"self": 33.77837855200073,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.573026747009862,
"count": 63983,
"is_parallel": true,
"self": 24.573026747009862
},
"communicator.exchange": {
"total": 1265.8699339379805,
"count": 63983,
"is_parallel": true,
"self": 1265.8699339379805
},
"steps_from_proto": {
"total": 103.47365735499352,
"count": 63983,
"is_parallel": true,
"self": 21.93969164510986,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.53396570988366,
"count": 511864,
"is_parallel": true,
"self": 81.53396570988366
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 702.1031127399012,
"count": 63984,
"self": 3.1575613799163875,
"children": {
"process_trajectory": {
"total": 136.9946915919877,
"count": 63984,
"self": 136.78829833498776,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20639325699994515,
"count": 2,
"self": 0.20639325699994515
}
}
},
"_update_policy": {
"total": 561.9508597679971,
"count": 448,
"self": 310.7162987520826,
"children": {
"TorchPPOOptimizer.update": {
"total": 251.23456101591455,
"count": 22779,
"self": 251.23456101591455
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0949997886200435e-06,
"count": 1,
"self": 1.0949997886200435e-06
},
"TrainerController._save_models": {
"total": 0.08605837400000382,
"count": 1,
"self": 0.0014864859999761393,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08457188800002768,
"count": 1,
"self": 0.08457188800002768
}
}
}
}
}
}
}