Takano Tsuyoshi
First Push
53e4c95
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9640218019485474,
"min": 0.9640218019485474,
"max": 2.869413137435913,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9236.29296875,
"min": 9236.29296875,
"max": 29354.095703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.804028511047363,
"min": 0.3183388113975525,
"max": 12.804028511047363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2496.78564453125,
"min": 61.757728576660156,
"max": 2567.095703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06865079957731045,
"min": 0.061432326079881805,
"max": 0.0788049022734163,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2746031983092418,
"min": 0.24572930431952722,
"max": 0.36457991496860687,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1927890313022277,
"min": 0.12226644568327888,
"max": 0.2746015622043142,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7711561252089107,
"min": 0.4890657827331155,
"max": 1.246996290141753,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.431818181818183,
"min": 3.25,
"max": 25.431818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1119.0,
"min": 143.0,
"max": 1374.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.431818181818183,
"min": 3.25,
"max": 25.431818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1119.0,
"min": 143.0,
"max": 1374.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697095678",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697096199"
},
"total": 521.1901375709999,
"count": 1,
"self": 0.7936796589997357,
"children": {
"run_training.setup": {
"total": 0.05145586900016497,
"count": 1,
"self": 0.05145586900016497
},
"TrainerController.start_learning": {
"total": 520.345002043,
"count": 1,
"self": 0.6699738859852005,
"children": {
"TrainerController._reset_env": {
"total": 7.744415398000001,
"count": 1,
"self": 7.744415398000001
},
"TrainerController.advance": {
"total": 511.75548740701515,
"count": 18212,
"self": 0.32411910408472977,
"children": {
"env_step": {
"total": 511.4313683029304,
"count": 18212,
"self": 351.57230312988986,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.52043451201007,
"count": 18212,
"self": 1.6282649960267008,
"children": {
"TorchPolicy.evaluate": {
"total": 157.89216951598337,
"count": 18212,
"self": 157.89216951598337
}
}
},
"workers": {
"total": 0.3386306610304928,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 518.6526568689787,
"count": 18212,
"is_parallel": true,
"self": 246.99266453196242,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005688622999969084,
"count": 1,
"is_parallel": true,
"self": 0.004150432999722398,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015381900002466864,
"count": 10,
"is_parallel": true,
"self": 0.0015381900002466864
}
}
},
"UnityEnvironment.step": {
"total": 0.06665733999989243,
"count": 1,
"is_parallel": true,
"self": 0.0008160580000549089,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005000360001758963,
"count": 1,
"is_parallel": true,
"self": 0.0005000360001758963
},
"communicator.exchange": {
"total": 0.06263203599974076,
"count": 1,
"is_parallel": true,
"self": 0.06263203599974076
},
"steps_from_proto": {
"total": 0.0027092099999208585,
"count": 1,
"is_parallel": true,
"self": 0.0006788720002077753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020303379997130833,
"count": 10,
"is_parallel": true,
"self": 0.0020303379997130833
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 271.6599923370163,
"count": 18211,
"is_parallel": true,
"self": 11.555879500043375,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.097999445963978,
"count": 18211,
"is_parallel": true,
"self": 6.097999445963978
},
"communicator.exchange": {
"total": 214.62000875897502,
"count": 18211,
"is_parallel": true,
"self": 214.62000875897502
},
"steps_from_proto": {
"total": 39.38610463203395,
"count": 18211,
"is_parallel": true,
"self": 7.726232506021461,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.659872126012488,
"count": 182110,
"is_parallel": true,
"self": 31.659872126012488
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015497399999730987,
"count": 1,
"self": 0.00015497399999730987,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 507.49239024904864,
"count": 526903,
"is_parallel": true,
"self": 11.439479294768716,
"children": {
"process_trajectory": {
"total": 288.1240477372821,
"count": 526903,
"is_parallel": true,
"self": 286.5670703632818,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5569773740003257,
"count": 4,
"is_parallel": true,
"self": 1.5569773740003257
}
}
},
"_update_policy": {
"total": 207.9288632169978,
"count": 90,
"is_parallel": true,
"self": 63.770783073009625,
"children": {
"TorchPPOOptimizer.update": {
"total": 144.15808014398817,
"count": 4584,
"is_parallel": true,
"self": 144.15808014398817
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.17497037799967075,
"count": 1,
"self": 0.0027256809999016696,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17224469699976908,
"count": 1,
"self": 0.17224469699976908
}
}
}
}
}
}
}