ZeroEW's picture
First Push
0daea34 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.2647666931152344,
"min": 1.2647666931152344,
"max": 2.863877534866333,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13022.0380859375,
"min": 12495.392578125,
"max": 29234.4609375,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.736746788024902,
"min": 0.35800108313560486,
"max": 10.736746788024902,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2190.29638671875,
"min": 69.45220947265625,
"max": 2190.29638671875,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07238719422249215,
"min": 0.060196596117463086,
"max": 0.07477123151058514,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3619359711124608,
"min": 0.24078638446985234,
"max": 0.3619359711124608,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24836751298577178,
"min": 0.0922892786626357,
"max": 0.30543570651435387,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.2418375649288589,
"min": 0.3691571146505428,
"max": 1.4984143718200573,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6464094511999996e-05,
"min": 1.6464094511999996e-05,
"max": 0.000283764005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.232047255999999e-05,
"min": 8.232047255999999e-05,
"max": 0.00127032007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.672727272727272,
"min": 2.840909090909091,
"max": 22.672727272727272,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1247.0,
"min": 125.0,
"max": 1247.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.672727272727272,
"min": 2.840909090909091,
"max": 22.672727272727272,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1247.0,
"min": 125.0,
"max": 1247.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745423387",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1745423595"
},
"total": 207.92418968399988,
"count": 1,
"self": 0.5419117169997207,
"children": {
"run_training.setup": {
"total": 0.022856881000052454,
"count": 1,
"self": 0.022856881000052454
},
"TrainerController.start_learning": {
"total": 207.3594210860001,
"count": 1,
"self": 0.1660783539844033,
"children": {
"TrainerController._reset_env": {
"total": 3.495977369000002,
"count": 1,
"self": 3.495977369000002
},
"TrainerController.advance": {
"total": 203.61584394401575,
"count": 9128,
"self": 0.17079442502836173,
"children": {
"env_step": {
"total": 143.59543823099943,
"count": 9128,
"self": 109.52477209898757,
"children": {
"SubprocessEnvManager._take_step": {
"total": 33.97441867200018,
"count": 9128,
"self": 0.6137675400059379,
"children": {
"TorchPolicy.evaluate": {
"total": 33.360651131994246,
"count": 9128,
"self": 33.360651131994246
}
}
},
"workers": {
"total": 0.09624746001168205,
"count": 9128,
"self": 0.0,
"children": {
"worker_root": {
"total": 206.55996655101035,
"count": 9128,
"is_parallel": true,
"self": 110.437781060004,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005887358000109089,
"count": 1,
"is_parallel": true,
"self": 0.00436246399999618,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015248940001129085,
"count": 10,
"is_parallel": true,
"self": 0.0015248940001129085
}
}
},
"UnityEnvironment.step": {
"total": 0.08112759899995581,
"count": 1,
"is_parallel": true,
"self": 0.0005576129999553814,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037815900009263714,
"count": 1,
"is_parallel": true,
"self": 0.00037815900009263714
},
"communicator.exchange": {
"total": 0.07419107600003372,
"count": 1,
"is_parallel": true,
"self": 0.07419107600003372
},
"steps_from_proto": {
"total": 0.006000750999874072,
"count": 1,
"is_parallel": true,
"self": 0.00037654099924111506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0056242100006329565,
"count": 10,
"is_parallel": true,
"self": 0.0056242100006329565
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 96.12218549100635,
"count": 9127,
"is_parallel": true,
"self": 4.68194382100387,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.7253682190116706,
"count": 9127,
"is_parallel": true,
"self": 2.7253682190116706
},
"communicator.exchange": {
"total": 73.52870396099252,
"count": 9127,
"is_parallel": true,
"self": 73.52870396099252
},
"steps_from_proto": {
"total": 15.186169489998292,
"count": 9127,
"is_parallel": true,
"self": 2.670813903988119,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.515355586010173,
"count": 91270,
"is_parallel": true,
"self": 12.515355586010173
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 59.849611287987955,
"count": 9128,
"self": 0.196823435993565,
"children": {
"process_trajectory": {
"total": 13.426351267993596,
"count": 9128,
"self": 13.196248684993407,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23010258300018904,
"count": 2,
"self": 0.23010258300018904
}
}
},
"_update_policy": {
"total": 46.226436584000794,
"count": 45,
"self": 18.87292744199749,
"children": {
"TorchPPOOptimizer.update": {
"total": 27.353509142003304,
"count": 2292,
"self": 27.353509142003304
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.170000794256339e-07,
"count": 1,
"self": 9.170000794256339e-07
},
"TrainerController._save_models": {
"total": 0.0815205019998757,
"count": 1,
"self": 0.0008095869998214766,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08071091500005423,
"count": 1,
"self": 0.08071091500005423
}
}
}
}
}
}
}