Pyramids / run_logs /timers.json
VATSAL1729's picture
Pyramid
876a8f6
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5054389834403992,
"min": 0.5054389834403992,
"max": 1.4887460470199585,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15195.517578125,
"min": 15195.517578125,
"max": 45162.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989872.0,
"min": 29929.0,
"max": 989872.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989872.0,
"min": 29929.0,
"max": 989872.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4794197678565979,
"min": -0.0907188355922699,
"max": 0.5055550932884216,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 129.92276000976562,
"min": -22.044677734375,
"max": 142.5665283203125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012201234698295593,
"min": -0.016036972403526306,
"max": 0.39961737394332886,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.3065345287323,
"min": -4.522426128387451,
"max": 94.70932006835938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06930266874542339,
"min": 0.06397790551894535,
"max": 0.07379962227978033,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9702373624359275,
"min": 0.47672827167291965,
"max": 1.0549287049062137,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016435038556389808,
"min": 0.0011488044866088264,
"max": 0.018728145837182332,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23009053978945732,
"min": 0.011488044866088263,
"max": 0.26219404172055266,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.207147597649999e-06,
"min": 7.207147597649999e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001009000663671,
"min": 0.0001009000663671,
"max": 0.003504933531688899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10240235,
"min": 0.10240235,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4336329,
"min": 1.3886848,
"max": 2.5683111,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00024999476500000005,
"min": 0.00024999476500000005,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0034999267100000006,
"min": 0.0034999267100000006,
"max": 0.11685427889,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009728473611176014,
"min": 0.008786574006080627,
"max": 0.41023382544517517,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13619862496852875,
"min": 0.12301203608512878,
"max": 2.8716368675231934,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 389.2763157894737,
"min": 355.6235294117647,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29585.0,
"min": 16856.0,
"max": 32922.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5366986804194265,
"min": -1.0000000521540642,
"max": 1.6131835092516507,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 118.32579839229584,
"min": -31.00000161677599,
"max": 137.1205982863903,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5366986804194265,
"min": -1.0000000521540642,
"max": 1.6131835092516507,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 118.32579839229584,
"min": -31.00000161677599,
"max": 137.1205982863903,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.039290871180128306,
"min": 0.03595578232125263,
"max": 7.917780039065025,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0253970808698796,
"min": 2.792343703637016,
"max": 134.60226066410542,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703424379",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703426546"
},
"total": 2167.447547816,
"count": 1,
"self": 0.4756289619999734,
"children": {
"run_training.setup": {
"total": 0.06844011700002284,
"count": 1,
"self": 0.06844011700002284
},
"TrainerController.start_learning": {
"total": 2166.903478737,
"count": 1,
"self": 1.2243080470434506,
"children": {
"TrainerController._reset_env": {
"total": 2.4276302289999876,
"count": 1,
"self": 2.4276302289999876
},
"TrainerController.advance": {
"total": 2163.1704864899566,
"count": 63746,
"self": 1.3060747380004614,
"children": {
"env_step": {
"total": 1529.225074084939,
"count": 63746,
"self": 1406.4002907918712,
"children": {
"SubprocessEnvManager._take_step": {
"total": 122.06248050399586,
"count": 63746,
"self": 4.57470943201929,
"children": {
"TorchPolicy.evaluate": {
"total": 117.48777107197657,
"count": 62573,
"self": 117.48777107197657
}
}
},
"workers": {
"total": 0.7623027890720095,
"count": 63746,
"self": 0.0,
"children": {
"worker_root": {
"total": 2162.1696823290335,
"count": 63746,
"is_parallel": true,
"self": 870.9166657920396,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017348989999845799,
"count": 1,
"is_parallel": true,
"self": 0.0005409890002283646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011939099997562153,
"count": 8,
"is_parallel": true,
"self": 0.0011939099997562153
}
}
},
"UnityEnvironment.step": {
"total": 0.05043402900003002,
"count": 1,
"is_parallel": true,
"self": 0.0006527610001967332,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004954369999268238,
"count": 1,
"is_parallel": true,
"self": 0.0004954369999268238
},
"communicator.exchange": {
"total": 0.047692593999954624,
"count": 1,
"is_parallel": true,
"self": 0.047692593999954624
},
"steps_from_proto": {
"total": 0.0015932369999518414,
"count": 1,
"is_parallel": true,
"self": 0.00033554999993157253,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001257687000020269,
"count": 8,
"is_parallel": true,
"self": 0.001257687000020269
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1291.253016536994,
"count": 63745,
"is_parallel": true,
"self": 34.2750190349409,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.4900794400246,
"count": 63745,
"is_parallel": true,
"self": 23.4900794400246
},
"communicator.exchange": {
"total": 1137.5250352120156,
"count": 63745,
"is_parallel": true,
"self": 1137.5250352120156
},
"steps_from_proto": {
"total": 95.96288285001287,
"count": 63745,
"is_parallel": true,
"self": 18.650780166125855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.31210268388702,
"count": 509960,
"is_parallel": true,
"self": 77.31210268388702
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 632.6393376670172,
"count": 63746,
"self": 2.453787488015678,
"children": {
"process_trajectory": {
"total": 125.64570959400226,
"count": 63746,
"self": 125.46035070100243,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18535889299982955,
"count": 2,
"self": 0.18535889299982955
}
}
},
"_update_policy": {
"total": 504.5398405849993,
"count": 455,
"self": 300.74700132896544,
"children": {
"TorchPPOOptimizer.update": {
"total": 203.79283925603386,
"count": 22797,
"self": 203.79283925603386
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0390003808424808e-06,
"count": 1,
"self": 1.0390003808424808e-06
},
"TrainerController._save_models": {
"total": 0.0810529319996931,
"count": 1,
"self": 0.0016387309997298871,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07941420099996321,
"count": 1,
"self": 0.07941420099996321
}
}
}
}
}
}
}