joshuaoreilly's picture
First Push
05ee55a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3950866460800171,
"min": 0.39358752965927124,
"max": 1.524118423461914,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11947.419921875,
"min": 11631.298828125,
"max": 46235.65625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989950.0,
"min": 29952.0,
"max": 989950.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989950.0,
"min": 29952.0,
"max": 989950.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5323733687400818,
"min": -0.1285138875246048,
"max": 0.5324333906173706,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 149.59690856933594,
"min": -30.45779037475586,
"max": 149.59690856933594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.022907644510269165,
"min": -0.022907644510269165,
"max": 0.34239712357521057,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.437047958374023,
"min": -6.437047958374023,
"max": 81.14811706542969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06999578922794589,
"min": 0.0649318358551855,
"max": 0.0730513108329843,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9799410491912425,
"min": 0.4920466859402114,
"max": 1.077874961427833,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014691411485318004,
"min": 0.0006416591306066847,
"max": 0.014781234102667353,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20567976079445205,
"min": 0.0064165913060668465,
"max": 0.20693727743734294,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.687404580421424e-06,
"min": 7.687404580421424e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010762366412589993,
"min": 0.00010762366412589993,
"max": 0.003509054330315301,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025624357142857,
"min": 0.1025624357142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358740999999997,
"min": 1.3886848,
"max": 2.5696847,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002659873278571427,
"min": 0.0002659873278571427,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037238225899999984,
"min": 0.0037238225899999984,
"max": 0.11699150153000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011936063878238201,
"min": 0.011936063878238201,
"max": 0.3608056902885437,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16710489988327026,
"min": 0.16710489988327026,
"max": 2.525639772415161,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.7586206896552,
"min": 345.7586206896552,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30081.0,
"min": 15984.0,
"max": 33037.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6013545244932175,
"min": -1.0000000521540642,
"max": 1.6013545244932175,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 140.91919815540314,
"min": -30.316001623868942,
"max": 140.91919815540314,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6013545244932175,
"min": -1.0000000521540642,
"max": 1.6013545244932175,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 140.91919815540314,
"min": -30.316001623868942,
"max": 140.91919815540314,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04373795944758802,
"min": 0.04373795944758802,
"max": 7.66122658457607,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8489404313877458,
"min": 3.664707865988021,
"max": 122.57962535321712,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699317136",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/joshua/.unit5env/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1699318072"
},
"total": 936.2414754469937,
"count": 1,
"self": 0.2188617599895224,
"children": {
"run_training.setup": {
"total": 0.024026172002777457,
"count": 1,
"self": 0.024026172002777457
},
"TrainerController.start_learning": {
"total": 935.9985875150014,
"count": 1,
"self": 0.9277100825420348,
"children": {
"TrainerController._reset_env": {
"total": 0.8563371839991305,
"count": 1,
"self": 0.8563371839991305
},
"TrainerController.advance": {
"total": 934.1649900384655,
"count": 63789,
"self": 0.9036711433873279,
"children": {
"env_step": {
"total": 566.6767629271344,
"count": 63789,
"self": 480.7142410739616,
"children": {
"SubprocessEnvManager._take_step": {
"total": 85.37871667974105,
"count": 63789,
"self": 2.5857024942233693,
"children": {
"TorchPolicy.evaluate": {
"total": 82.79301418551768,
"count": 62570,
"self": 82.79301418551768
}
}
},
"workers": {
"total": 0.583805173431756,
"count": 63789,
"self": 0.0,
"children": {
"worker_root": {
"total": 934.8296124787885,
"count": 63789,
"is_parallel": true,
"self": 513.76958110962,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009138259920291603,
"count": 1,
"is_parallel": true,
"self": 0.0002673079870874062,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000646518004941754,
"count": 8,
"is_parallel": true,
"self": 0.000646518004941754
}
}
},
"UnityEnvironment.step": {
"total": 0.019890288007445633,
"count": 1,
"is_parallel": true,
"self": 0.00020827901607844979,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018747099966276437,
"count": 1,
"is_parallel": true,
"self": 0.00018747099966276437
},
"communicator.exchange": {
"total": 0.0188237569964258,
"count": 1,
"is_parallel": true,
"self": 0.0188237569964258
},
"steps_from_proto": {
"total": 0.0006707809952786192,
"count": 1,
"is_parallel": true,
"self": 0.000166817000717856,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005039639945607632,
"count": 8,
"is_parallel": true,
"self": 0.0005039639945607632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 421.0600313691684,
"count": 63788,
"is_parallel": true,
"self": 12.722350242445827,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.756323895970127,
"count": 63788,
"is_parallel": true,
"self": 9.756323895970127
},
"communicator.exchange": {
"total": 359.6099881291011,
"count": 63788,
"is_parallel": true,
"self": 359.6099881291011
},
"steps_from_proto": {
"total": 38.97136910165136,
"count": 63788,
"is_parallel": true,
"self": 9.196664849616354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.774704252035008,
"count": 510304,
"is_parallel": true,
"self": 29.774704252035008
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 366.58455596794374,
"count": 63789,
"self": 1.576518306901562,
"children": {
"process_trajectory": {
"total": 70.12976506890845,
"count": 63789,
"self": 70.0251674039173,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10459766499116085,
"count": 2,
"self": 0.10459766499116085
}
}
},
"_update_policy": {
"total": 294.8782725921337,
"count": 447,
"self": 167.20554342020478,
"children": {
"TorchPPOOptimizer.update": {
"total": 127.67272917192895,
"count": 22821,
"self": 127.67272917192895
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.67001586407423e-07,
"count": 1,
"self": 6.67001586407423e-07
},
"TrainerController._save_models": {
"total": 0.0495495429931907,
"count": 1,
"self": 0.0007666759920539334,
"children": {
"RLTrainer._checkpoint": {
"total": 0.048782867001136765,
"count": 1,
"self": 0.048782867001136765
}
}
}
}
}
}
}