ppo-Huggy / run_logs /timers.json
MichaelJT's picture
Huggy
137e612
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4124481678009033,
"min": 1.4124481678009033,
"max": 1.432417631149292,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69623.8046875,
"min": 67400.296875,
"max": 76730.2265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.4043321299639,
"min": 80.16207455429497,
"max": 401.064,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49530.0,
"min": 49001.0,
"max": 50133.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999994.0,
"min": 49969.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999994.0,
"min": 49969.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.488279342651367,
"min": 0.162667915225029,
"max": 2.488279342651367,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1378.5067138671875,
"min": 20.170822143554688,
"max": 1464.669921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.939544552284888,
"min": 1.8519001903553163,
"max": 3.939544552284888,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2182.507681965828,
"min": 229.63562360405922,
"max": 2279.7741298675537,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.939544552284888,
"min": 1.8519001903553163,
"max": 3.939544552284888,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2182.507681965828,
"min": 229.63562360405922,
"max": 2279.7741298675537,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01719893228219007,
"min": 0.013665210593414183,
"max": 0.021129726972624968,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05159679684657021,
"min": 0.027700417781791963,
"max": 0.05470786787530718,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05756763372984197,
"min": 0.021306627057492734,
"max": 0.05936220983664195,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17270290118952591,
"min": 0.04261325411498547,
"max": 0.17808662950992585,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3273488909166713e-06,
"min": 3.3273488909166713e-06,
"max": 0.000295327276557575,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.982046672750014e-06,
"min": 9.982046672750014e-06,
"max": 0.0008438364187211998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110908333333335,
"min": 0.10110908333333335,
"max": 0.19844242499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30332725000000005,
"min": 0.20738075,
"max": 0.5812788,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.53432583333334e-05,
"min": 6.53432583333334e-05,
"max": 0.004922277007499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019602977500000023,
"min": 0.00019602977500000023,
"max": 0.01406581212,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671114974",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671117121"
},
"total": 2146.338780379,
"count": 1,
"self": 0.44991176599978644,
"children": {
"run_training.setup": {
"total": 0.10492129499994007,
"count": 1,
"self": 0.10492129499994007
},
"TrainerController.start_learning": {
"total": 2145.783947318,
"count": 1,
"self": 3.547047028951056,
"children": {
"TrainerController._reset_env": {
"total": 9.833553510000002,
"count": 1,
"self": 9.833553510000002
},
"TrainerController.advance": {
"total": 2132.287943966049,
"count": 231572,
"self": 3.8047097041312554,
"children": {
"env_step": {
"total": 1664.588859509918,
"count": 231572,
"self": 1399.369346649948,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.7614915139137,
"count": 231572,
"self": 13.958595119969004,
"children": {
"TorchPolicy.evaluate": {
"total": 248.8028963939447,
"count": 222890,
"self": 62.42961748401183,
"children": {
"TorchPolicy.sample_actions": {
"total": 186.37327890993288,
"count": 222890,
"self": 186.37327890993288
}
}
}
}
},
"workers": {
"total": 2.4580213460562845,
"count": 231572,
"self": 0.0,
"children": {
"worker_root": {
"total": 2138.3051650090247,
"count": 231572,
"is_parallel": true,
"self": 988.1054924620034,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006216375999997581,
"count": 1,
"is_parallel": true,
"self": 0.0003436470001361158,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005872728999861465,
"count": 2,
"is_parallel": true,
"self": 0.005872728999861465
}
}
},
"UnityEnvironment.step": {
"total": 0.026779017999956523,
"count": 1,
"is_parallel": true,
"self": 0.00027728299994578265,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017859499996575323,
"count": 1,
"is_parallel": true,
"self": 0.00017859499996575323
},
"communicator.exchange": {
"total": 0.02560856299999159,
"count": 1,
"is_parallel": true,
"self": 0.02560856299999159
},
"steps_from_proto": {
"total": 0.0007145770000533958,
"count": 1,
"is_parallel": true,
"self": 0.00023601700013387017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004785599999195256,
"count": 2,
"is_parallel": true,
"self": 0.0004785599999195256
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1150.1996725470212,
"count": 231571,
"is_parallel": true,
"self": 33.89846268816177,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.03968718100953,
"count": 231571,
"is_parallel": true,
"self": 74.03968718100953
},
"communicator.exchange": {
"total": 952.0682062849356,
"count": 231571,
"is_parallel": true,
"self": 952.0682062849356
},
"steps_from_proto": {
"total": 90.19331639291431,
"count": 231571,
"is_parallel": true,
"self": 37.246456146761034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.94686024615328,
"count": 463142,
"is_parallel": true,
"self": 52.94686024615328
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 463.89437475199986,
"count": 231572,
"self": 5.800120952989914,
"children": {
"process_trajectory": {
"total": 139.50029610301033,
"count": 231572,
"self": 138.3462213590102,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1540747440001269,
"count": 10,
"self": 1.1540747440001269
}
}
},
"_update_policy": {
"total": 318.5939576959996,
"count": 97,
"self": 265.6822459980067,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.91171169799293,
"count": 2910,
"self": 52.91171169799293
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.450000106880907e-07,
"count": 1,
"self": 9.450000106880907e-07
},
"TrainerController._save_models": {
"total": 0.11540186799993535,
"count": 1,
"self": 0.0019282379998912802,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11347363000004407,
"count": 1,
"self": 0.11347363000004407
}
}
}
}
}
}
}