ppo-Pyramids / run_logs /timers.json
hung3r's picture
First Push
b611db6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5119559168815613,
"min": 0.5034074187278748,
"max": 1.4569073915481567,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15391.443359375,
"min": 14965.294921875,
"max": 44196.7421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989962.0,
"min": 29952.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989962.0,
"min": 29952.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4965977966785431,
"min": -0.10757958143949509,
"max": 0.5024316310882568,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 135.57119750976562,
"min": -25.926679611206055,
"max": 138.168701171875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.14437216520309448,
"min": -0.05144139751791954,
"max": 0.2575911283493042,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 39.41360092163086,
"min": -14.043501853942871,
"max": 62.33705520629883,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06429459458592114,
"min": 0.06290810950304399,
"max": 0.0730116966252262,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.900124324202896,
"min": 0.49941473010805,
"max": 1.0316345880079705,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01575176536780342,
"min": 0.0006719232966448452,
"max": 0.016589767467285996,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2205247151492479,
"min": 0.0073911562630932974,
"max": 0.23225674454200393,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.292690426278571e-06,
"min": 7.292690426278571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010209766596789999,
"min": 0.00010209766596789999,
"max": 0.0032539844153385996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243086428571431,
"min": 0.10243086428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340321000000003,
"min": 1.3886848,
"max": 2.4016496,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025284334214285717,
"min": 0.00025284334214285717,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035398067900000007,
"min": 0.0035398067900000007,
"max": 0.10848767386000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006991219241172075,
"min": 0.006991219241172075,
"max": 0.430558443069458,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09787707030773163,
"min": 0.09787707030773163,
"max": 3.013909101486206,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 394.2987012987013,
"min": 344.4318181818182,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30361.0,
"min": 15984.0,
"max": 32663.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4757895884769303,
"min": -1.0000000521540642,
"max": 1.6100999810309573,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 113.63579831272364,
"min": -30.359001725912094,
"max": 141.68879833072424,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4757895884769303,
"min": -1.0000000521540642,
"max": 1.6100999810309573,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 113.63579831272364,
"min": -30.359001725912094,
"max": 141.68879833072424,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028597743684908243,
"min": 0.02780689784611406,
"max": 9.7458865782246,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2020262637379346,
"min": 2.2020262637379346,
"max": 155.9341852515936,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1761130414",
"python_version": "3.10.18 (main, Sep 2 2025, 14:19:37) [Clang 20.1.4 ]",
"command_line_arguments": "/home/kali/HFRLCourse/unit5/.venv/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1761131659"
},
"total": 1244.615810724,
"count": 1,
"self": 0.2694132350000018,
"children": {
"run_training.setup": {
"total": 0.013962421999963226,
"count": 1,
"self": 0.013962421999963226
},
"TrainerController.start_learning": {
"total": 1244.332435067,
"count": 1,
"self": 0.8393597730209876,
"children": {
"TrainerController._reset_env": {
"total": 4.279692654000087,
"count": 1,
"self": 4.279692654000087
},
"TrainerController.advance": {
"total": 1239.177523711979,
"count": 63612,
"self": 0.6828005569375364,
"children": {
"env_step": {
"total": 947.8981407069955,
"count": 63612,
"self": 898.6912184359851,
"children": {
"SubprocessEnvManager._take_step": {
"total": 48.66240158897756,
"count": 63612,
"self": 1.9695368339429251,
"children": {
"TorchPolicy.evaluate": {
"total": 46.692864755034634,
"count": 62554,
"self": 46.692864755034634
}
}
},
"workers": {
"total": 0.5445206820328394,
"count": 63612,
"self": 0.0,
"children": {
"worker_root": {
"total": 1242.009381177011,
"count": 63612,
"is_parallel": true,
"self": 396.22949405805855,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001819523999984085,
"count": 1,
"is_parallel": true,
"self": 0.0006689489998734643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011505750001106207,
"count": 8,
"is_parallel": true,
"self": 0.0011505750001106207
}
}
},
"UnityEnvironment.step": {
"total": 0.028025157000001855,
"count": 1,
"is_parallel": true,
"self": 0.0001569569999446685,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00015895100000307139,
"count": 1,
"is_parallel": true,
"self": 0.00015895100000307139
},
"communicator.exchange": {
"total": 0.026012844999968365,
"count": 1,
"is_parallel": true,
"self": 0.026012844999968365
},
"steps_from_proto": {
"total": 0.0016964040000857494,
"count": 1,
"is_parallel": true,
"self": 0.00014630800035320135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001550095999732548,
"count": 8,
"is_parallel": true,
"self": 0.001550095999732548
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 845.7798871189524,
"count": 63611,
"is_parallel": true,
"self": 9.647923194920963,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.677414510994026,
"count": 63611,
"is_parallel": true,
"self": 7.677414510994026
},
"communicator.exchange": {
"total": 798.7448140880049,
"count": 63611,
"is_parallel": true,
"self": 798.7448140880049
},
"steps_from_proto": {
"total": 29.70973532503251,
"count": 63611,
"is_parallel": true,
"self": 7.056422167005735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.653313158026776,
"count": 508888,
"is_parallel": true,
"self": 22.653313158026776
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 290.596582448046,
"count": 63612,
"self": 1.5125231610353467,
"children": {
"process_trajectory": {
"total": 55.1328491810076,
"count": 63612,
"self": 54.99991200400757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13293717700003072,
"count": 2,
"self": 0.13293717700003072
}
}
},
"_update_policy": {
"total": 233.95121010600303,
"count": 441,
"self": 120.07437019402391,
"children": {
"TorchPPOOptimizer.update": {
"total": 113.87683991197912,
"count": 22812,
"self": 113.87683991197912
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.610000582440989e-07,
"count": 1,
"self": 6.610000582440989e-07
},
"TrainerController._save_models": {
"total": 0.03585826699986683,
"count": 1,
"self": 0.0018302670000593935,
"children": {
"RLTrainer._checkpoint": {
"total": 0.034027999999807435,
"count": 1,
"self": 0.034027999999807435
}
}
}
}
}
}
}