bayerasif's picture
first
e6201ca verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1272134929895401,
"min": 0.1272134929895401,
"max": 1.3262442350387573,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 3810.29833984375,
"min": 3810.29833984375,
"max": 40232.9453125,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479984.0,
"min": 29985.0,
"max": 479984.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479984.0,
"min": 29985.0,
"max": 479984.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07193244993686676,
"min": -0.09850554913282394,
"max": -0.016926417127251625,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -17.33572006225586,
"min": -23.838342666625977,
"max": -4.028487205505371,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.17792682349681854,
"min": 0.17224307358264923,
"max": 0.6509669423103333,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 42.88036346435547,
"min": 41.682823181152344,
"max": 154.9301300048828,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06857857384182693,
"min": 0.06573357961227373,
"max": 0.07641168728673359,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9601000337855771,
"min": 0.6112934982938687,
"max": 0.9670511001941935,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0009419295516526155,
"min": 0.0009419295516526155,
"max": 0.01696418479152543,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.013187013723136617,
"min": 0.012759273213140684,
"max": 0.13571347833220343,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.1178892940400006e-05,
"min": 2.1178892940400006e-05,
"max": 0.00028952452849182494,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0002965045011656001,
"min": 0.0002965045011656001,
"max": 0.0030623521792159996,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1070596,
"min": 0.1070596,
"max": 0.196508175,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4988344,
"min": 1.4988344,
"max": 2.2544892000000005,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007152540400000001,
"min": 0.0007152540400000001,
"max": 0.0096511666825,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.010013556560000001,
"min": 0.010013556560000001,
"max": 0.1020963216,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.17545127868652344,
"min": 0.16994965076446533,
"max": 0.6004651188850403,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 2.456317901611328,
"min": 2.221611976623535,
"max": 4.803720951080322,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 970.6363636363636,
"min": 835.6785714285714,
"max": 990.1935483870968,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32031.0,
"min": 17551.0,
"max": 32894.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.7893939873937404,
"min": -0.927812172607942,
"max": -0.19328576140105724,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -26.050001583993435,
"min": -30.617801696062088,
"max": -5.412001319229603,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.7893939873937404,
"min": -0.927812172607942,
"max": -0.19328576140105724,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -26.050001583993435,
"min": -30.617801696062088,
"max": -5.412001319229603,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.6776832059928866,
"min": 1.547971571329981,
"max": 13.12600251701143,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 55.363545797765255,
"min": 43.34320399723947,
"max": 236.26804530620575,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742206343",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742207496"
},
"total": 1152.8515827570002,
"count": 1,
"self": 0.528543523000053,
"children": {
"run_training.setup": {
"total": 0.0231692079998993,
"count": 1,
"self": 0.0231692079998993
},
"TrainerController.start_learning": {
"total": 1152.2998700260002,
"count": 1,
"self": 0.8183597809761523,
"children": {
"TrainerController._reset_env": {
"total": 2.4161718329999076,
"count": 1,
"self": 2.4161718329999076
},
"TrainerController.advance": {
"total": 1148.9599393170242,
"count": 31526,
"self": 0.8572265440759566,
"children": {
"env_step": {
"total": 769.935642186965,
"count": 31526,
"self": 674.8972606359789,
"children": {
"SubprocessEnvManager._take_step": {
"total": 94.5065609099845,
"count": 31526,
"self": 2.7934232269929,
"children": {
"TorchPolicy.evaluate": {
"total": 91.7131376829916,
"count": 31314,
"self": 91.7131376829916
}
}
},
"workers": {
"total": 0.5318206410015591,
"count": 31526,
"self": 0.0,
"children": {
"worker_root": {
"total": 1149.2957857769727,
"count": 31526,
"is_parallel": true,
"self": 541.5865705459908,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002257364000115558,
"count": 1,
"is_parallel": true,
"self": 0.0008444520003649814,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014129119997505768,
"count": 8,
"is_parallel": true,
"self": 0.0014129119997505768
}
}
},
"UnityEnvironment.step": {
"total": 0.0573951549999947,
"count": 1,
"is_parallel": true,
"self": 0.0006599119999464165,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048608700012664485,
"count": 1,
"is_parallel": true,
"self": 0.00048608700012664485
},
"communicator.exchange": {
"total": 0.05424490600012177,
"count": 1,
"is_parallel": true,
"self": 0.05424490600012177
},
"steps_from_proto": {
"total": 0.0020042499997998675,
"count": 1,
"is_parallel": true,
"self": 0.00042477899955883913,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015794710002410284,
"count": 8,
"is_parallel": true,
"self": 0.0015794710002410284
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 607.709215230982,
"count": 31525,
"is_parallel": true,
"self": 18.615341138924123,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.026500515044518,
"count": 31525,
"is_parallel": true,
"self": 14.026500515044518
},
"communicator.exchange": {
"total": 517.3038788049869,
"count": 31525,
"is_parallel": true,
"self": 517.3038788049869
},
"steps_from_proto": {
"total": 57.763494772026434,
"count": 31525,
"is_parallel": true,
"self": 11.967007112074498,
"children": {
"_process_rank_one_or_two_observation": {
"total": 45.796487659951936,
"count": 252200,
"is_parallel": true,
"self": 45.796487659951936
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 378.1670705859833,
"count": 31526,
"self": 1.4379709509653367,
"children": {
"process_trajectory": {
"total": 71.53439372101889,
"count": 31526,
"self": 71.408672678019,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12572104299988496,
"count": 1,
"self": 0.12572104299988496
}
}
},
"_update_policy": {
"total": 305.1947059139991,
"count": 214,
"self": 169.87876787899813,
"children": {
"TorchPPOOptimizer.update": {
"total": 135.31593803500095,
"count": 11397,
"self": 135.31593803500095
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1349998203513678e-06,
"count": 1,
"self": 1.1349998203513678e-06
},
"TrainerController._save_models": {
"total": 0.10539796000011847,
"count": 1,
"self": 0.0014738519998900301,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10392410800022844,
"count": 1,
"self": 0.10392410800022844
}
}
}
}
}
}
}