ppo-Pyramids / run_logs /timers.json
lmcastanedame's picture
First push
ee9c8a1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4496605098247528,
"min": 0.44725897908210754,
"max": 1.4203987121582031,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13425.064453125,
"min": 13425.064453125,
"max": 43089.21484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989950.0,
"min": 29952.0,
"max": 989950.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989950.0,
"min": 29952.0,
"max": 989950.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4410683214664459,
"min": -0.09438677132129669,
"max": 0.44679006934165955,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 117.32417297363281,
"min": -22.747211456298828,
"max": 118.39936828613281,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02166244015097618,
"min": 0.004690803121775389,
"max": 0.3096609115600586,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.762208938598633,
"min": 1.257135272026062,
"max": 74.62828063964844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06712939155404456,
"min": 0.06310612193614645,
"max": 0.07426697016120751,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0069408733106684,
"min": 0.5198687911284525,
"max": 1.06502585002454,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014275552474686669,
"min": 9.64751464294081e-05,
"max": 0.015094648061551875,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21413328712030003,
"min": 0.0012541769035823052,
"max": 0.22641972092327814,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5295974901666655e-06,
"min": 7.5295974901666655e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011294396235249998,
"min": 0.00011294396235249998,
"max": 0.0036347959884013995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250983333333334,
"min": 0.10250983333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376475,
"min": 1.3886848,
"max": 2.6175044,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026073234999999997,
"min": 0.00026073234999999997,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039109852499999995,
"min": 0.0039109852499999995,
"max": 0.12117870013999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009830583818256855,
"min": 0.009830583818256855,
"max": 0.6006709933280945,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1474587619304657,
"min": 0.1401887834072113,
"max": 4.204697132110596,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 425.4428571428571,
"min": 401.7260273972603,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29781.0,
"min": 15984.0,
"max": 32503.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.488811404790197,
"min": -1.0000000521540642,
"max": 1.488811404790197,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.2167983353138,
"min": -31.989601641893387,
"max": 108.28219812363386,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.488811404790197,
"min": -1.0000000521540642,
"max": 1.488811404790197,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.2167983353138,
"min": -31.989601641893387,
"max": 108.28219812363386,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04392006372800097,
"min": 0.04392006372800097,
"max": 12.819432344287634,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.074404460960068,
"min": 3.02317702419532,
"max": 205.11091750860214,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748853013",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748855301"
},
"total": 2287.66375926,
"count": 1,
"self": 0.5262872150001385,
"children": {
"run_training.setup": {
"total": 0.020351246000245737,
"count": 1,
"self": 0.020351246000245737
},
"TrainerController.start_learning": {
"total": 2287.1171207989996,
"count": 1,
"self": 1.2866967519598802,
"children": {
"TrainerController._reset_env": {
"total": 2.8552870749999784,
"count": 1,
"self": 2.8552870749999784
},
"TrainerController.advance": {
"total": 2282.889447572039,
"count": 63695,
"self": 1.348595003979426,
"children": {
"env_step": {
"total": 1605.8923090979565,
"count": 63695,
"self": 1455.5528562839627,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.5763137209583,
"count": 63695,
"self": 4.602102556985756,
"children": {
"TorchPolicy.evaluate": {
"total": 144.97421116397254,
"count": 62568,
"self": 144.97421116397254
}
}
},
"workers": {
"total": 0.7631390930355337,
"count": 63695,
"self": 0.0,
"children": {
"worker_root": {
"total": 2282.079897979919,
"count": 63695,
"is_parallel": true,
"self": 938.1965698530444,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021469180001076893,
"count": 1,
"is_parallel": true,
"self": 0.0006848070006526541,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014621109994550352,
"count": 8,
"is_parallel": true,
"self": 0.0014621109994550352
}
}
},
"UnityEnvironment.step": {
"total": 0.07653444900006434,
"count": 1,
"is_parallel": true,
"self": 0.0005186480002521421,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004286069997760933,
"count": 1,
"is_parallel": true,
"self": 0.0004286069997760933
},
"communicator.exchange": {
"total": 0.07394713700023203,
"count": 1,
"is_parallel": true,
"self": 0.07394713700023203
},
"steps_from_proto": {
"total": 0.0016400569998040737,
"count": 1,
"is_parallel": true,
"self": 0.00033048399973267806,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013095730000713957,
"count": 8,
"is_parallel": true,
"self": 0.0013095730000713957
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1343.8833281268749,
"count": 63694,
"is_parallel": true,
"self": 31.657103102872497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.669921956041435,
"count": 63694,
"is_parallel": true,
"self": 23.669921956041435
},
"communicator.exchange": {
"total": 1191.9187643819187,
"count": 63694,
"is_parallel": true,
"self": 1191.9187643819187
},
"steps_from_proto": {
"total": 96.63753868604226,
"count": 63694,
"is_parallel": true,
"self": 19.133771818095283,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.50376686794698,
"count": 509552,
"is_parallel": true,
"self": 77.50376686794698
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 675.648543470103,
"count": 63695,
"self": 2.5957560410602127,
"children": {
"process_trajectory": {
"total": 130.0042602910471,
"count": 63695,
"self": 129.74787680504733,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2563834859997769,
"count": 2,
"self": 0.2563834859997769
}
}
},
"_update_policy": {
"total": 543.0485271379957,
"count": 455,
"self": 305.3452910630117,
"children": {
"TorchPPOOptimizer.update": {
"total": 237.70323607498403,
"count": 22767,
"self": 237.70323607498403
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.940005500335246e-07,
"count": 1,
"self": 8.940005500335246e-07
},
"TrainerController._save_models": {
"total": 0.08568850600022415,
"count": 1,
"self": 0.0014467900000454392,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08424171600017871,
"count": 1,
"self": 0.08424171600017871
}
}
}
}
}
}
}