SarvasvaK's picture
First Push
8e9907e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9720405340194702,
"min": 0.9395491480827332,
"max": 2.8710896968841553,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9291.7353515625,
"min": 9291.7353515625,
"max": 29402.830078125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.996184349060059,
"min": 0.3563234806060791,
"max": 12.996184349060059,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2534.255859375,
"min": 69.12675476074219,
"max": 2635.646240234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0764249693859122,
"min": 0.0597350064909002,
"max": 0.0764249693859122,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3056998775436488,
"min": 0.2389400259636008,
"max": 0.37889095313371135,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20040471815303262,
"min": 0.09967196709932943,
"max": 0.2821638917221742,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8016188726121305,
"min": 0.39868786839731774,
"max": 1.410819458610871,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.886363636363637,
"min": 2.840909090909091,
"max": 25.945454545454545,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1095.0,
"min": 125.0,
"max": 1427.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.886363636363637,
"min": 2.840909090909091,
"max": 25.945454545454545,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1095.0,
"min": 125.0,
"max": 1427.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677660764",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677661251"
},
"total": 487.0033276429999,
"count": 1,
"self": 0.44096165100000917,
"children": {
"run_training.setup": {
"total": 0.12016855999996778,
"count": 1,
"self": 0.12016855999996778
},
"TrainerController.start_learning": {
"total": 486.44219743199994,
"count": 1,
"self": 0.6026620150071835,
"children": {
"TrainerController._reset_env": {
"total": 10.669831644999988,
"count": 1,
"self": 10.669831644999988
},
"TrainerController.advance": {
"total": 475.04947278399277,
"count": 18203,
"self": 0.2926843860043391,
"children": {
"env_step": {
"total": 474.75678839798843,
"count": 18203,
"self": 327.0080900459791,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.45213378400206,
"count": 18203,
"self": 1.740901123010815,
"children": {
"TorchPolicy.evaluate": {
"total": 145.71123266099124,
"count": 18203,
"self": 31.83229217399179,
"children": {
"TorchPolicy.sample_actions": {
"total": 113.87894048699945,
"count": 18203,
"self": 113.87894048699945
}
}
}
}
},
"workers": {
"total": 0.2965645680072839,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 484.78188452599494,
"count": 18203,
"is_parallel": true,
"self": 235.36593480899722,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008082075999993776,
"count": 1,
"is_parallel": true,
"self": 0.00410171800012904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0039803579998647365,
"count": 10,
"is_parallel": true,
"self": 0.0039803579998647365
}
}
},
"UnityEnvironment.step": {
"total": 0.034782475999975304,
"count": 1,
"is_parallel": true,
"self": 0.0003852659999665775,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036965900000041074,
"count": 1,
"is_parallel": true,
"self": 0.00036965900000041074
},
"communicator.exchange": {
"total": 0.03222047700000985,
"count": 1,
"is_parallel": true,
"self": 0.03222047700000985
},
"steps_from_proto": {
"total": 0.0018070739999984653,
"count": 1,
"is_parallel": true,
"self": 0.00044168799996668895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013653860000317763,
"count": 10,
"is_parallel": true,
"self": 0.0013653860000317763
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 249.41594971699772,
"count": 18202,
"is_parallel": true,
"self": 9.807816266004693,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.524471646995835,
"count": 18202,
"is_parallel": true,
"self": 5.524471646995835
},
"communicator.exchange": {
"total": 200.66517420499326,
"count": 18202,
"is_parallel": true,
"self": 200.66517420499326
},
"steps_from_proto": {
"total": 33.418487599003925,
"count": 18202,
"is_parallel": true,
"self": 7.594726261986182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.823761337017743,
"count": 182020,
"is_parallel": true,
"self": 25.823761337017743
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016011299999263429,
"count": 1,
"self": 0.00016011299999263429,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 471.57311987101286,
"count": 420145,
"is_parallel": true,
"self": 10.872734307994563,
"children": {
"process_trajectory": {
"total": 269.46519762801825,
"count": 420145,
"is_parallel": true,
"self": 268.3843790760182,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0808185520000393,
"count": 4,
"is_parallel": true,
"self": 1.0808185520000393
}
}
},
"_update_policy": {
"total": 191.23518793500006,
"count": 90,
"is_parallel": true,
"self": 66.33391956400214,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.90126837099791,
"count": 4587,
"is_parallel": true,
"self": 124.90126837099791
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12007087500001035,
"count": 1,
"self": 0.0008945130000483914,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11917636199996196,
"count": 1,
"self": 0.11917636199996196
}
}
}
}
}
}
}