ppo-PyramidsRND / run_logs /timers.json
reeeemo's picture
PPO agent playing PyramidsRND
44b96a1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6007863879203796,
"min": 0.5869089961051941,
"max": 1.414193034172058,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17994.75390625,
"min": 17447.630859375,
"max": 42900.9609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.31948328018188477,
"min": -0.10557053983211517,
"max": 0.4195455014705658,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 81.7877197265625,
"min": -25.336929321289062,
"max": 112.43819427490234,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 2.248438596725464,
"min": -0.0685669332742691,
"max": 2.248438596725464,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 575.6002807617188,
"min": -18.375938415527344,
"max": 575.6002807617188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06410927527481579,
"min": 0.06410927527481579,
"max": 0.07322715137733334,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8975298538474211,
"min": 0.5033686440742452,
"max": 1.050122542716517,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.5087601175487395,
"min": 0.0006060078173445532,
"max": 0.5087601175487395,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 7.122641645682354,
"min": 0.008116679193518375,
"max": 7.122641645682354,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.63312602708571e-06,
"min": 7.63312602708571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010686376437919995,
"min": 0.00010686376437919995,
"max": 0.0036324841891719993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254434285714285,
"min": 0.10254434285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356208,
"min": 1.3886848,
"max": 2.6108280000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026417985142857133,
"min": 0.00026417985142857133,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036985179199999986,
"min": 0.0036985179199999986,
"max": 0.1211017172,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0115062166005373,
"min": 0.011263100430369377,
"max": 0.4828164577484131,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1610870361328125,
"min": 0.15768340229988098,
"max": 3.3797152042388916,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 539.2452830188679,
"min": 421.4117647058824,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28580.0,
"min": 15984.0,
"max": 32790.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1618038135079236,
"min": -1.0000000521540642,
"max": 1.4609117433428764,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 60.41379830241203,
"min": -29.972401678562164,
"max": 99.3419985473156,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1618038135079236,
"min": -1.0000000521540642,
"max": 1.4609117433428764,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 60.41379830241203,
"min": -29.972401678562164,
"max": 99.3419985473156,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06344578137101892,
"min": 0.05586810091121793,
"max": 10.330518838018179,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.299180631292984,
"min": 3.299180631292984,
"max": 165.28830140829086,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1767219606",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1767222950"
},
"total": 3344.27587093,
"count": 1,
"self": 0.815596027000538,
"children": {
"run_training.setup": {
"total": 0.04285213399998611,
"count": 1,
"self": 0.04285213399998611
},
"TrainerController.start_learning": {
"total": 3343.4174227689996,
"count": 1,
"self": 2.3496857651452956,
"children": {
"TrainerController._reset_env": {
"total": 3.245838208999885,
"count": 1,
"self": 3.245838208999885
},
"TrainerController.advance": {
"total": 3337.746532857855,
"count": 63419,
"self": 2.440177610862065,
"children": {
"env_step": {
"total": 2204.166210944998,
"count": 63419,
"self": 2033.6102120589553,
"children": {
"SubprocessEnvManager._take_step": {
"total": 169.04719281805365,
"count": 63419,
"self": 7.243923052082209,
"children": {
"TorchPolicy.evaluate": {
"total": 161.80326976597144,
"count": 62549,
"self": 161.80326976597144
}
}
},
"workers": {
"total": 1.5088060679890987,
"count": 63419,
"self": 0.0,
"children": {
"worker_root": {
"total": 3334.96605568387,
"count": 63419,
"is_parallel": true,
"self": 1489.853049374783,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0036992250002185756,
"count": 1,
"is_parallel": true,
"self": 0.0012899590005872597,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002409265999631316,
"count": 8,
"is_parallel": true,
"self": 0.002409265999631316
}
}
},
"UnityEnvironment.step": {
"total": 0.07831306799971571,
"count": 1,
"is_parallel": true,
"self": 0.0006744459997207741,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005545180001718109,
"count": 1,
"is_parallel": true,
"self": 0.0005545180001718109
},
"communicator.exchange": {
"total": 0.07480144499959351,
"count": 1,
"is_parallel": true,
"self": 0.07480144499959351
},
"steps_from_proto": {
"total": 0.0022826590002296143,
"count": 1,
"is_parallel": true,
"self": 0.0004378190001261828,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018448400001034315,
"count": 8,
"is_parallel": true,
"self": 0.0018448400001034315
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1845.1130063090868,
"count": 63418,
"is_parallel": true,
"self": 47.420577480834254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.60307305602737,
"count": 63418,
"is_parallel": true,
"self": 31.60307305602737
},
"communicator.exchange": {
"total": 1618.4006372940535,
"count": 63418,
"is_parallel": true,
"self": 1618.4006372940535
},
"steps_from_proto": {
"total": 147.68871847817172,
"count": 63418,
"is_parallel": true,
"self": 29.270431820247268,
"children": {
"_process_rank_one_or_two_observation": {
"total": 118.41828665792445,
"count": 507344,
"is_parallel": true,
"self": 118.41828665792445
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1131.140144301995,
"count": 63419,
"self": 4.426785048102374,
"children": {
"process_trajectory": {
"total": 166.52914554888457,
"count": 63419,
"self": 166.1153513688846,
"children": {
"RLTrainer._checkpoint": {
"total": 0.41379417999996804,
"count": 2,
"self": 0.41379417999996804
}
}
},
"_update_policy": {
"total": 960.184213705008,
"count": 451,
"self": 377.9726370689759,
"children": {
"TorchPPOOptimizer.update": {
"total": 582.2115766360321,
"count": 22803,
"self": 582.2115766360321
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0949997886200435e-06,
"count": 1,
"self": 1.0949997886200435e-06
},
"TrainerController._save_models": {
"total": 0.0753648419995443,
"count": 1,
"self": 0.001869173999693885,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07349566799985041,
"count": 1,
"self": 0.07349566799985041
}
}
}
}
}
}
}