hpandana's picture
First Push
67eb90b
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0071253776550293,
"min": 1.0071253776550293,
"max": 2.8567819595336914,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9616.033203125,
"min": 9616.033203125,
"max": 29287.728515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.273775100708008,
"min": 0.3510074317455292,
"max": 12.273775100708008,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2393.38623046875,
"min": 68.09544372558594,
"max": 2444.73681640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06672713810614511,
"min": 0.06228393689054749,
"max": 0.07469709612524086,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26690855242458045,
"min": 0.2604283546782373,
"max": 0.3627195860350255,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2280574045052715,
"min": 0.12686488323721706,
"max": 0.26481765397623475,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.912229618021086,
"min": 0.5074595329488683,
"max": 1.3240882698811738,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.568181818181817,
"min": 3.659090909090909,
"max": 24.568181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1081.0,
"min": 161.0,
"max": 1329.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.568181818181817,
"min": 3.659090909090909,
"max": 24.568181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1081.0,
"min": 161.0,
"max": 1329.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698353690",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698354167"
},
"total": 476.83103462400004,
"count": 1,
"self": 0.48679543500009004,
"children": {
"run_training.setup": {
"total": 0.04416528199999448,
"count": 1,
"self": 0.04416528199999448
},
"TrainerController.start_learning": {
"total": 476.30007390699996,
"count": 1,
"self": 0.5709882100012464,
"children": {
"TrainerController._reset_env": {
"total": 8.488065153999969,
"count": 1,
"self": 8.488065153999969
},
"TrainerController.advance": {
"total": 467.1488819999988,
"count": 18199,
"self": 0.2867436110032031,
"children": {
"env_step": {
"total": 466.8621383889956,
"count": 18199,
"self": 321.31379386898584,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.2593162090037,
"count": 18199,
"self": 1.4774413309943384,
"children": {
"TorchPolicy.evaluate": {
"total": 143.78187487800938,
"count": 18199,
"self": 143.78187487800938
}
}
},
"workers": {
"total": 0.28902831100606363,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 474.9625781350031,
"count": 18199,
"is_parallel": true,
"self": 233.45926926100083,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006212681999954839,
"count": 1,
"is_parallel": true,
"self": 0.004732071999796972,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001480610000157867,
"count": 10,
"is_parallel": true,
"self": 0.001480610000157867
}
}
},
"UnityEnvironment.step": {
"total": 0.04675541799997518,
"count": 1,
"is_parallel": true,
"self": 0.0006641449999733595,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003038650000917187,
"count": 1,
"is_parallel": true,
"self": 0.0003038650000917187
},
"communicator.exchange": {
"total": 0.04386866499999087,
"count": 1,
"is_parallel": true,
"self": 0.04386866499999087
},
"steps_from_proto": {
"total": 0.0019187429999192318,
"count": 1,
"is_parallel": true,
"self": 0.00036318199977358745,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015555610001456444,
"count": 10,
"is_parallel": true,
"self": 0.0015555610001456444
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 241.5033088740023,
"count": 18198,
"is_parallel": true,
"self": 10.808184912000797,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.40296825000155,
"count": 18198,
"is_parallel": true,
"self": 5.40296825000155
},
"communicator.exchange": {
"total": 191.0699934670008,
"count": 18198,
"is_parallel": true,
"self": 191.0699934670008
},
"steps_from_proto": {
"total": 34.22216224499914,
"count": 18198,
"is_parallel": true,
"self": 6.4979095029982545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.724252742000886,
"count": 181980,
"is_parallel": true,
"self": 27.724252742000886
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011686699997426331,
"count": 1,
"self": 0.00011686699997426331,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 463.34478458796184,
"count": 481559,
"is_parallel": true,
"self": 10.192770662906014,
"children": {
"process_trajectory": {
"total": 261.2859830070556,
"count": 481559,
"is_parallel": true,
"self": 260.50867375205587,
"children": {
"RLTrainer._checkpoint": {
"total": 0.77730925499975,
"count": 4,
"is_parallel": true,
"self": 0.77730925499975
}
}
},
"_update_policy": {
"total": 191.8660309180002,
"count": 90,
"is_parallel": true,
"self": 60.47238907100109,
"children": {
"TorchPPOOptimizer.update": {
"total": 131.39364184699912,
"count": 4587,
"is_parallel": true,
"self": 131.39364184699912
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09202167599994482,
"count": 1,
"self": 0.0008797759999197297,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09114190000002509,
"count": 1,
"self": 0.09114190000002509
}
}
}
}
}
}
}