ppo-Pyramids / run_logs /timers.json
francescosabbarese's picture
First Push
0f6d036 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.48590391874313354,
"min": 0.48590391874313354,
"max": 1.457601547241211,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14553.7939453125,
"min": 14553.7939453125,
"max": 44217.80078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4879007339477539,
"min": -0.10075342655181885,
"max": 0.4994926452636719,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 132.22109985351562,
"min": -24.28157615661621,
"max": 137.3604736328125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02640671469271183,
"min": 0.004988966975361109,
"max": 0.3659305274486542,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.156219482421875,
"min": 1.2522307634353638,
"max": 86.72553253173828,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06950693395143996,
"min": 0.06564307843965433,
"max": 0.07502164592107564,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9730970753201594,
"min": 0.5251515214475295,
"max": 1.0410452815218982,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015755050522178255,
"min": 0.00017284758437055307,
"max": 0.015755050522178255,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22057070731049558,
"min": 0.00224701859681719,
"max": 0.22088682625326328,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.619390317378571e-06,
"min": 7.619390317378571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010667146444329999,
"min": 0.00010667146444329999,
"max": 0.0036329656890115,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253976428571429,
"min": 0.10253976428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355567,
"min": 1.3886848,
"max": 2.6109885,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026372245214285715,
"min": 0.00026372245214285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00369211433,
"min": 0.00369211433,
"max": 0.12111775114999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01213329378515482,
"min": 0.012119092978537083,
"max": 0.43788641691207886,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16986611485481262,
"min": 0.1696673035621643,
"max": 3.0652048587799072,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 374.97333333333336,
"min": 347.2857142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28123.0,
"min": 15984.0,
"max": 33512.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.568654039622964,
"min": -1.0000000521540642,
"max": 1.568654039622964,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 116.08039893209934,
"min": -31.994801685214043,
"max": 130.45939860492945,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.568654039622964,
"min": -1.0000000521540642,
"max": 1.568654039622964,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 116.08039893209934,
"min": -31.994801685214043,
"max": 130.45939860492945,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04688127742505261,
"min": 0.043723730707321974,
"max": 9.210717312991619,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.469214529453893,
"min": 3.469214529453893,
"max": 147.3714770078659,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739284742",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739287229"
},
"total": 2487.523988394,
"count": 1,
"self": 0.4844394380002086,
"children": {
"run_training.setup": {
"total": 0.024786961999780033,
"count": 1,
"self": 0.024786961999780033
},
"TrainerController.start_learning": {
"total": 2487.014761994,
"count": 1,
"self": 2.024960574077795,
"children": {
"TrainerController._reset_env": {
"total": 2.647554940000191,
"count": 1,
"self": 2.647554940000191
},
"TrainerController.advance": {
"total": 2482.2387613129217,
"count": 63649,
"self": 2.1107471620184697,
"children": {
"env_step": {
"total": 1720.3163650169859,
"count": 63649,
"self": 1513.5203735600658,
"children": {
"SubprocessEnvManager._take_step": {
"total": 205.53364215391866,
"count": 63649,
"self": 6.145917172907957,
"children": {
"TorchPolicy.evaluate": {
"total": 199.3877249810107,
"count": 62541,
"self": 199.3877249810107
}
}
},
"workers": {
"total": 1.2623493030014288,
"count": 63649,
"self": 0.0,
"children": {
"worker_root": {
"total": 2483.3618870609753,
"count": 63649,
"is_parallel": true,
"self": 1109.8475425129923,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023434750000888016,
"count": 1,
"is_parallel": true,
"self": 0.0008050639989960473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015384110010927543,
"count": 8,
"is_parallel": true,
"self": 0.0015384110010927543
}
}
},
"UnityEnvironment.step": {
"total": 0.048532271000112814,
"count": 1,
"is_parallel": true,
"self": 0.00042943100015691016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032007499976316467,
"count": 1,
"is_parallel": true,
"self": 0.00032007499976316467
},
"communicator.exchange": {
"total": 0.04627425200033031,
"count": 1,
"is_parallel": true,
"self": 0.04627425200033031
},
"steps_from_proto": {
"total": 0.0015085129998624325,
"count": 1,
"is_parallel": true,
"self": 0.0003384779997759324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011700350000865,
"count": 8,
"is_parallel": true,
"self": 0.0011700350000865
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1373.514344547983,
"count": 63648,
"is_parallel": true,
"self": 33.441422214954855,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.663814775015,
"count": 63648,
"is_parallel": true,
"self": 22.663814775015
},
"communicator.exchange": {
"total": 1217.8894092500195,
"count": 63648,
"is_parallel": true,
"self": 1217.8894092500195
},
"steps_from_proto": {
"total": 99.51969830799362,
"count": 63648,
"is_parallel": true,
"self": 21.846926700062795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.67277160793083,
"count": 509184,
"is_parallel": true,
"self": 77.67277160793083
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 759.8116491339174,
"count": 63649,
"self": 3.8446903570170434,
"children": {
"process_trajectory": {
"total": 146.40354270690568,
"count": 63649,
"self": 146.1624913129058,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2410513939998964,
"count": 2,
"self": 0.2410513939998964
}
}
},
"_update_policy": {
"total": 609.5634160699947,
"count": 454,
"self": 328.12082110805613,
"children": {
"TorchPPOOptimizer.update": {
"total": 281.44259496193854,
"count": 22791,
"self": 281.44259496193854
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0280000424245372e-06,
"count": 1,
"self": 1.0280000424245372e-06
},
"TrainerController._save_models": {
"total": 0.103484139000102,
"count": 1,
"self": 0.0016448719998152228,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10183926700028678,
"count": 1,
"self": 0.10183926700028678
}
}
}
}
}
}
}