enaitzb's picture
First Push
1e5888d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1256626844406128,
"min": 1.1256626844406128,
"max": 2.8609366416931152,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10735.4453125,
"min": 10735.4453125,
"max": 29361.79296875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.738016128540039,
"min": 0.5861009359359741,
"max": 11.738016128540039,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2288.9130859375,
"min": 113.70358276367188,
"max": 2352.447509765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06664960866014465,
"min": 0.06136969322802079,
"max": 0.07309905315488839,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2665984346405786,
"min": 0.24547877291208317,
"max": 0.3608087172874993,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18445931152239733,
"min": 0.15272608458968426,
"max": 0.27350295098388894,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7378372460895893,
"min": 0.610904338358737,
"max": 1.3675147549194446,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.636363636363637,
"min": 4.159090909090909,
"max": 23.636363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1040.0,
"min": 183.0,
"max": 1277.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.636363636363637,
"min": 4.159090909090909,
"max": 23.636363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1040.0,
"min": 183.0,
"max": 1277.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700725942",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700726458"
},
"total": 516.403559946,
"count": 1,
"self": 0.4258133140000382,
"children": {
"run_training.setup": {
"total": 0.050145450000002256,
"count": 1,
"self": 0.050145450000002256
},
"TrainerController.start_learning": {
"total": 515.927601182,
"count": 1,
"self": 0.5901721500058557,
"children": {
"TrainerController._reset_env": {
"total": 11.068037057999959,
"count": 1,
"self": 11.068037057999959
},
"TrainerController.advance": {
"total": 504.1711175729942,
"count": 18199,
"self": 0.2825441489978857,
"children": {
"env_step": {
"total": 503.8885734239963,
"count": 18199,
"self": 347.55728937699337,
"children": {
"SubprocessEnvManager._take_step": {
"total": 155.89251201700768,
"count": 18199,
"self": 1.5406164860104354,
"children": {
"TorchPolicy.evaluate": {
"total": 154.35189553099724,
"count": 18199,
"self": 154.35189553099724
}
}
},
"workers": {
"total": 0.43877202999527753,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 514.4553518639977,
"count": 18199,
"is_parallel": true,
"self": 254.00517901500177,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006218901000011101,
"count": 1,
"is_parallel": true,
"self": 0.004604712000002564,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016141890000085368,
"count": 10,
"is_parallel": true,
"self": 0.0016141890000085368
}
}
},
"UnityEnvironment.step": {
"total": 0.05065842999999859,
"count": 1,
"is_parallel": true,
"self": 0.0007022200000506018,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004167799999663657,
"count": 1,
"is_parallel": true,
"self": 0.0004167799999663657
},
"communicator.exchange": {
"total": 0.047151954000014484,
"count": 1,
"is_parallel": true,
"self": 0.047151954000014484
},
"steps_from_proto": {
"total": 0.002387475999967137,
"count": 1,
"is_parallel": true,
"self": 0.0005709429999001259,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001816533000067011,
"count": 10,
"is_parallel": true,
"self": 0.001816533000067011
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 260.450172848996,
"count": 18198,
"is_parallel": true,
"self": 11.66819776098987,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.757660393004812,
"count": 18198,
"is_parallel": true,
"self": 5.757660393004812
},
"communicator.exchange": {
"total": 205.90590315700217,
"count": 18198,
"is_parallel": true,
"self": 205.90590315700217
},
"steps_from_proto": {
"total": 37.11841153799912,
"count": 18198,
"is_parallel": true,
"self": 7.149924786015902,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.96848675198322,
"count": 181980,
"is_parallel": true,
"self": 29.96848675198322
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012808500002847722,
"count": 1,
"self": 0.00012808500002847722,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 499.68063952697827,
"count": 519048,
"is_parallel": true,
"self": 11.550333757943577,
"children": {
"process_trajectory": {
"total": 279.55057404903476,
"count": 519048,
"is_parallel": true,
"self": 278.9821034930349,
"children": {
"RLTrainer._checkpoint": {
"total": 0.568470555999852,
"count": 4,
"is_parallel": true,
"self": 0.568470555999852
}
}
},
"_update_policy": {
"total": 208.57973171999993,
"count": 90,
"is_parallel": true,
"self": 71.87472291299883,
"children": {
"TorchPPOOptimizer.update": {
"total": 136.7050088070011,
"count": 4587,
"is_parallel": true,
"self": 136.7050088070011
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09814631599999757,
"count": 1,
"self": 0.0009594590000006065,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09718685699999696,
"count": 1,
"self": 0.09718685699999696
}
}
}
}
}
}
}