AgentXXX's picture
First commit
25f491f
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9691912531852722,
"min": 0.9691912531852722,
"max": 2.8547604084014893,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9243.1767578125,
"min": 9243.1767578125,
"max": 29235.6015625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.400653839111328,
"min": 0.39931046962738037,
"max": 13.51445198059082,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2613.12744140625,
"min": 77.46623229980469,
"max": 2756.456787109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06943172063721945,
"min": 0.06553791098376678,
"max": 0.07353814042995081,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2777268825488778,
"min": 0.2621516439350671,
"max": 0.3676907021497541,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1770546653910595,
"min": 0.12274705784682952,
"max": 0.28022431575638407,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.708218661564238,
"min": 0.49098823138731806,
"max": 1.315684741297189,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000007e-06,
"min": 8.082097306000007e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400003e-05,
"min": 3.232838922400003e-05,
"max": 0.0013851600382799997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.295454545454547,
"min": 4.0227272727272725,
"max": 26.490909090909092,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1157.0,
"min": 177.0,
"max": 1457.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.295454545454547,
"min": 4.0227272727272725,
"max": 26.490909090909092,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1157.0,
"min": 177.0,
"max": 1457.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673370163",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673370779"
},
"total": 616.9305473180002,
"count": 1,
"self": 0.38820713300015086,
"children": {
"run_training.setup": {
"total": 0.10746501300013733,
"count": 1,
"self": 0.10746501300013733
},
"TrainerController.start_learning": {
"total": 616.4348751719999,
"count": 1,
"self": 0.5078972250021252,
"children": {
"TrainerController._reset_env": {
"total": 8.517950889000076,
"count": 1,
"self": 8.517950889000076
},
"TrainerController.advance": {
"total": 607.2827824889982,
"count": 18203,
"self": 0.25616611001623824,
"children": {
"env_step": {
"total": 607.026616378982,
"count": 18203,
"self": 460.06393688195067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.69948821101843,
"count": 18203,
"self": 1.3573593030143911,
"children": {
"TorchPolicy.evaluate": {
"total": 145.34212890800404,
"count": 18203,
"self": 32.66103243599082,
"children": {
"TorchPolicy.sample_actions": {
"total": 112.68109647201322,
"count": 18203,
"self": 112.68109647201322
}
}
}
}
},
"workers": {
"total": 0.26319128601289776,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 615.1903098529897,
"count": 18203,
"is_parallel": true,
"self": 397.2321358399861,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005542058999935762,
"count": 1,
"is_parallel": true,
"self": 0.0031921669994972035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002349892000438558,
"count": 10,
"is_parallel": true,
"self": 0.002349892000438558
}
}
},
"UnityEnvironment.step": {
"total": 0.032841696999867054,
"count": 1,
"is_parallel": true,
"self": 0.0005305549998411152,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037286199994923663,
"count": 1,
"is_parallel": true,
"self": 0.00037286199994923663
},
"communicator.exchange": {
"total": 0.03012855999986641,
"count": 1,
"is_parallel": true,
"self": 0.03012855999986641
},
"steps_from_proto": {
"total": 0.0018097200002102909,
"count": 1,
"is_parallel": true,
"self": 0.0004144150002503011,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013953049999599898,
"count": 10,
"is_parallel": true,
"self": 0.0013953049999599898
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 217.9581740130036,
"count": 18202,
"is_parallel": true,
"self": 8.362239242990427,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.991336620012362,
"count": 18202,
"is_parallel": true,
"self": 4.991336620012362
},
"communicator.exchange": {
"total": 172.47982819102117,
"count": 18202,
"is_parallel": true,
"self": 172.47982819102117
},
"steps_from_proto": {
"total": 32.12476995897964,
"count": 18202,
"is_parallel": true,
"self": 6.565998277023255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.558771681956387,
"count": 182020,
"is_parallel": true,
"self": 25.558771681956387
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.52079998467525e-05,
"count": 1,
"self": 4.52079998467525e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 604.3309083509896,
"count": 331963,
"is_parallel": true,
"self": 9.038655579061924,
"children": {
"process_trajectory": {
"total": 238.02562971292696,
"count": 331963,
"is_parallel": true,
"self": 237.16832442892724,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8573052839997217,
"count": 4,
"is_parallel": true,
"self": 0.8573052839997217
}
}
},
"_update_policy": {
"total": 357.2666230590007,
"count": 90,
"is_parallel": true,
"self": 139.13507826699924,
"children": {
"TorchPPOOptimizer.update": {
"total": 218.13154479200148,
"count": 15290,
"is_parallel": true,
"self": 218.13154479200148
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12619936099963525,
"count": 1,
"self": 0.0010316429998056265,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12516771799982962,
"count": 1,
"self": 0.12516771799982962
}
}
}
}
}
}
}