svonas's picture
First Push
e4e0825 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7586241960525513,
"min": 0.7586241960525513,
"max": 2.8544113636016846,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7276.72314453125,
"min": 7276.72314453125,
"max": 29552.62109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.935254096984863,
"min": 0.49685052037239075,
"max": 12.935254096984863,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1306.460693359375,
"min": 48.19449996948242,
"max": 1306.460693359375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.692307692307693,
"min": 4.545454545454546,
"max": 25.692307692307693,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1336.0,
"min": 200.0,
"max": 1368.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.692307692307693,
"min": 4.545454545454546,
"max": 25.692307692307693,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1336.0,
"min": 200.0,
"max": 1368.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06971587601374246,
"min": 0.06252595739651416,
"max": 0.07932468573163336,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27886350405496985,
"min": 0.2512164670034134,
"max": 0.38023306122825795,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21430498339674053,
"min": 0.15694370387377693,
"max": 0.28470112056124447,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8572199335869621,
"min": 0.6277748154951077,
"max": 1.4235056028062223,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.950097350000008e-06,
"min": 7.950097350000008e-06,
"max": 0.00029175000274999995,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.180038940000003e-05,
"min": 3.180038940000003e-05,
"max": 0.0013845000385,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10265,
"min": 0.10265,
"max": 0.19725000000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4106,
"min": 0.4106,
"max": 0.9615,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014223500000000013,
"min": 0.00014223500000000013,
"max": 0.004862775,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005689400000000005,
"min": 0.0005689400000000005,
"max": 0.023078849999999998,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1753713753",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget10 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1753714150"
},
"total": 396.793191234,
"count": 1,
"self": 0.4292780040002526,
"children": {
"run_training.setup": {
"total": 0.03202699799999209,
"count": 1,
"self": 0.03202699799999209
},
"TrainerController.start_learning": {
"total": 396.33188623199976,
"count": 1,
"self": 0.3077648999885696,
"children": {
"TrainerController._reset_env": {
"total": 2.154582571999981,
"count": 1,
"self": 2.154582571999981
},
"TrainerController.advance": {
"total": 393.79389522701126,
"count": 18200,
"self": 0.35139653600026577,
"children": {
"env_step": {
"total": 282.4045093190284,
"count": 18200,
"self": 218.07722089307072,
"children": {
"SubprocessEnvManager._take_step": {
"total": 64.13332768598366,
"count": 18200,
"self": 1.1586363560104473,
"children": {
"TorchPolicy.evaluate": {
"total": 62.97469132997321,
"count": 18200,
"self": 62.97469132997321
}
}
},
"workers": {
"total": 0.19396073997404528,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 395.0735726729845,
"count": 18200,
"is_parallel": true,
"self": 202.94649992893642,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018547719998878165,
"count": 1,
"is_parallel": true,
"self": 0.0005407570001807471,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013140149997070694,
"count": 10,
"is_parallel": true,
"self": 0.0013140149997070694
}
}
},
"UnityEnvironment.step": {
"total": 0.03833118700003979,
"count": 1,
"is_parallel": true,
"self": 0.0007042439999622729,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003785149999657733,
"count": 1,
"is_parallel": true,
"self": 0.0003785149999657733
},
"communicator.exchange": {
"total": 0.034344431000135955,
"count": 1,
"is_parallel": true,
"self": 0.034344431000135955
},
"steps_from_proto": {
"total": 0.0029039969999757886,
"count": 1,
"is_parallel": true,
"self": 0.00045152600000619714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024524709999695915,
"count": 10,
"is_parallel": true,
"self": 0.0024524709999695915
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 192.12707274404806,
"count": 18199,
"is_parallel": true,
"self": 9.377790536154862,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.07854646598571,
"count": 18199,
"is_parallel": true,
"self": 5.07854646598571
},
"communicator.exchange": {
"total": 148.15007760494404,
"count": 18199,
"is_parallel": true,
"self": 148.15007760494404
},
"steps_from_proto": {
"total": 29.520658136963448,
"count": 18199,
"is_parallel": true,
"self": 5.2074960119648495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.3131621249986,
"count": 181990,
"is_parallel": true,
"self": 24.3131621249986
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 111.03798937198258,
"count": 18200,
"self": 0.3815790689959613,
"children": {
"process_trajectory": {
"total": 20.794334337989085,
"count": 18200,
"self": 20.38428447798924,
"children": {
"RLTrainer._checkpoint": {
"total": 0.41004985999984456,
"count": 4,
"self": 0.41004985999984456
}
}
},
"_update_policy": {
"total": 89.86207596499753,
"count": 90,
"self": 37.33359989100086,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.52847607399667,
"count": 4590,
"self": 52.52847607399667
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.139998837781604e-07,
"count": 1,
"self": 9.139998837781604e-07
},
"TrainerController._save_models": {
"total": 0.07564261900006386,
"count": 1,
"self": 0.0007836929999029962,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07485892600016086,
"count": 1,
"self": 0.07485892600016086
}
}
}
}
}
}
}