Porridge9243's picture
First training of SnowballTarget
990547d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9245128631591797,
"min": 0.910373330116272,
"max": 2.8594515323638916,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8827.2490234375,
"min": 8827.2490234375,
"max": 29283.642578125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.120320320129395,
"min": 0.41733986139297485,
"max": 13.120320320129395,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2558.46240234375,
"min": 80.96393585205078,
"max": 2657.85498046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0738347838254698,
"min": 0.05967851885946528,
"max": 0.0738347838254698,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2953391353018792,
"min": 0.2387140754378611,
"max": 0.3641555840366374,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20398687001536875,
"min": 0.12756253310534008,
"max": 0.28186374709886663,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.815947480061475,
"min": 0.5102501324213603,
"max": 1.409318735494333,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.704545454545453,
"min": 3.75,
"max": 26.01818181818182,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1131.0,
"min": 165.0,
"max": 1431.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.704545454545453,
"min": 3.75,
"max": 26.01818181818182,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1131.0,
"min": 165.0,
"max": 1431.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674991348",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674991756"
},
"total": 408.11438145399995,
"count": 1,
"self": 0.3901444219999348,
"children": {
"run_training.setup": {
"total": 0.10111920000002783,
"count": 1,
"self": 0.10111920000002783
},
"TrainerController.start_learning": {
"total": 407.623117832,
"count": 1,
"self": 0.4763156089917402,
"children": {
"TrainerController._reset_env": {
"total": 9.495524726000042,
"count": 1,
"self": 9.495524726000042
},
"TrainerController.advance": {
"total": 397.53845061900813,
"count": 18203,
"self": 0.27249582999866107,
"children": {
"env_step": {
"total": 397.26595478900947,
"count": 18203,
"self": 256.6922533030079,
"children": {
"SubprocessEnvManager._take_step": {
"total": 140.32313358400143,
"count": 18203,
"self": 1.3193259329931948,
"children": {
"TorchPolicy.evaluate": {
"total": 139.00380765100823,
"count": 18203,
"self": 30.064472051008693,
"children": {
"TorchPolicy.sample_actions": {
"total": 108.93933559999954,
"count": 18203,
"self": 108.93933559999954
}
}
}
}
},
"workers": {
"total": 0.25056790200017076,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 406.51824279499846,
"count": 18203,
"is_parallel": true,
"self": 198.5220036239915,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006369433000031677,
"count": 1,
"is_parallel": true,
"self": 0.004003128000135803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002366304999895874,
"count": 10,
"is_parallel": true,
"self": 0.002366304999895874
}
}
},
"UnityEnvironment.step": {
"total": 0.032174534999967364,
"count": 1,
"is_parallel": true,
"self": 0.00032712299991999316,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038185600004680964,
"count": 1,
"is_parallel": true,
"self": 0.00038185600004680964
},
"communicator.exchange": {
"total": 0.029998616999989736,
"count": 1,
"is_parallel": true,
"self": 0.029998616999989736
},
"steps_from_proto": {
"total": 0.0014669390000108251,
"count": 1,
"is_parallel": true,
"self": 0.00039233900002955124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010745999999812739,
"count": 10,
"is_parallel": true,
"self": 0.0010745999999812739
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 207.99623917100695,
"count": 18202,
"is_parallel": true,
"self": 7.952578044982374,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.7027605280063085,
"count": 18202,
"is_parallel": true,
"self": 4.7027605280063085
},
"communicator.exchange": {
"total": 167.19394784600655,
"count": 18202,
"is_parallel": true,
"self": 167.19394784600655
},
"steps_from_proto": {
"total": 28.146952752011714,
"count": 18202,
"is_parallel": true,
"self": 5.937526614019021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.209426137992693,
"count": 182020,
"is_parallel": true,
"self": 22.209426137992693
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.7286000065250846e-05,
"count": 1,
"self": 3.7286000065250846e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 394.87759988998357,
"count": 323398,
"is_parallel": true,
"self": 8.2864439499632,
"children": {
"process_trajectory": {
"total": 226.6280251670206,
"count": 323398,
"is_parallel": true,
"self": 225.86242896202054,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7655962050000653,
"count": 4,
"is_parallel": true,
"self": 0.7655962050000653
}
}
},
"_update_policy": {
"total": 159.96313077299976,
"count": 90,
"is_parallel": true,
"self": 40.029927494005165,
"children": {
"TorchPPOOptimizer.update": {
"total": 119.9332032789946,
"count": 4587,
"is_parallel": true,
"self": 119.9332032789946
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11278959200001282,
"count": 1,
"self": 0.0008768299999246665,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11191276200008815,
"count": 1,
"self": 0.11191276200008815
}
}
}
}
}
}
}