arkadyark's picture
First push, 400k 3 layer model trained for 400k steps
4ad557f
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5973043441772461,
"min": 0.5927567481994629,
"max": 2.8682010173797607,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6038.1494140625,
"min": 5817.75146484375,
"max": 29467.896484375,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.54208755493164,
"min": 0.4041427969932556,
"max": 13.642247200012207,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2640.70703125,
"min": 78.40370178222656,
"max": 2796.66064453125,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04510780773407911,
"min": 0.0382987928193567,
"max": 0.05707887563282081,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.18043123093631644,
"min": 0.1531951712774268,
"max": 0.262276754218874,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1775948346282045,
"min": 0.1326320384784291,
"max": 0.29224417942265674,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.710379338512818,
"min": 0.5305281539137164,
"max": 1.4612208971132836,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.594098702999996e-06,
"min": 2.594098702999996e-06,
"max": 0.000197294001353,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.0376394811999984e-05,
"min": 1.0376394811999984e-05,
"max": 0.00096172001914,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101297,
"min": 0.101297,
"max": 0.19864700000000002,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.405188,
"min": 0.405188,
"max": 0.9808600000000001,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.472029999999992e-05,
"min": 7.472029999999992e-05,
"max": 0.0049324853,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00029888119999999966,
"min": 0.00029888119999999966,
"max": 0.024044914,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.65909090909091,
"min": 3.7045454545454546,
"max": 27.177777777777777,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1173.0,
"min": 163.0,
"max": 1468.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.65909090909091,
"min": 3.7045454545454546,
"max": 27.177777777777777,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1173.0,
"min": 163.0,
"max": 1468.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681694043",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/ark/.miniconda3/envs/deep-rl/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1681694405"
},
"total": 361.47480759606697,
"count": 1,
"self": 0.16771216713823378,
"children": {
"run_training.setup": {
"total": 0.0135927899973467,
"count": 1,
"self": 0.0135927899973467
},
"TrainerController.start_learning": {
"total": 361.2935026389314,
"count": 1,
"self": 0.6753824901534244,
"children": {
"TrainerController._reset_env": {
"total": 2.9347397970268503,
"count": 1,
"self": 2.9347397970268503
},
"TrainerController.advance": {
"total": 357.558367938851,
"count": 36410,
"self": 0.28387514408677816,
"children": {
"env_step": {
"total": 357.2744927947642,
"count": 36410,
"self": 235.91425884119235,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.06870580743998,
"count": 36410,
"self": 1.541191311669536,
"children": {
"TorchPolicy.evaluate": {
"total": 119.52751449577045,
"count": 36410,
"self": 119.52751449577045
}
}
},
"workers": {
"total": 0.2915281461318955,
"count": 36410,
"self": 0.0,
"children": {
"worker_root": {
"total": 360.5419435563963,
"count": 36410,
"is_parallel": true,
"self": 181.41203785897233,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010253129294142127,
"count": 1,
"is_parallel": true,
"self": 0.00030319509096443653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007221178384497762,
"count": 10,
"is_parallel": true,
"self": 0.0007221178384497762
}
}
},
"UnityEnvironment.step": {
"total": 0.015111739048734307,
"count": 1,
"is_parallel": true,
"self": 0.00027951900847256184,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020405708346515894,
"count": 1,
"is_parallel": true,
"self": 0.00020405708346515894
},
"communicator.exchange": {
"total": 0.01380195701494813,
"count": 1,
"is_parallel": true,
"self": 0.01380195701494813
},
"steps_from_proto": {
"total": 0.0008262059418484569,
"count": 1,
"is_parallel": true,
"self": 0.0001718559069558978,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000654350034892559,
"count": 10,
"is_parallel": true,
"self": 0.000654350034892559
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 179.12990569742396,
"count": 36409,
"is_parallel": true,
"self": 9.175134137971327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.8228315422311425,
"count": 36409,
"is_parallel": true,
"self": 4.8228315422311425
},
"communicator.exchange": {
"total": 138.08378690422978,
"count": 36409,
"is_parallel": true,
"self": 138.08378690422978
},
"steps_from_proto": {
"total": 27.048153112991713,
"count": 36409,
"is_parallel": true,
"self": 5.249076615087688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 21.799076497904025,
"count": 364090,
"is_parallel": true,
"self": 21.799076497904025
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001209089532494545,
"count": 1,
"self": 0.0001209089532494545,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 357.41318416793365,
"count": 257795,
"is_parallel": true,
"self": 1.9118539512855932,
"children": {
"process_trajectory": {
"total": 205.47245113260578,
"count": 257795,
"is_parallel": true,
"self": 204.02495776873548,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4474933638703078,
"count": 8,
"is_parallel": true,
"self": 1.4474933638703078
}
}
},
"_update_policy": {
"total": 150.02887908404227,
"count": 181,
"is_parallel": true,
"self": 48.422241764259525,
"children": {
"TorchPPOOptimizer.update": {
"total": 101.60663731978275,
"count": 4344,
"is_parallel": true,
"self": 101.60663731978275
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12489150394685566,
"count": 1,
"self": 0.001268954947590828,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12362254899926484,
"count": 1,
"self": 0.12362254899926484
}
}
}
}
}
}
}