ppo-Huggy / run_logs /timers.json
PavanDeepak's picture
Huggy
ce09de9
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4060269594192505,
"min": 1.4060269594192505,
"max": 1.4277915954589844,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70216.984375,
"min": 69001.7578125,
"max": 77287.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.25285481239804,
"min": 73.62630792227205,
"max": 385.8,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49195.0,
"min": 49195.0,
"max": 50154.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999982.0,
"min": 49777.0,
"max": 1999982.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999982.0,
"min": 49777.0,
"max": 1999982.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.48728346824646,
"min": 0.05978509783744812,
"max": 2.4928228855133057,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1527.1920166015625,
"min": 7.712277412414551,
"max": 1647.25244140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.975908906723855,
"min": 1.8359847242055938,
"max": 4.004988871526494,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2441.208068728447,
"min": 236.8420294225216,
"max": 2596.220712721348,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.975908906723855,
"min": 1.8359847242055938,
"max": 4.004988871526494,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2441.208068728447,
"min": 236.8420294225216,
"max": 2596.220712721348,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019467238628486584,
"min": 0.013400655822988484,
"max": 0.019467238628486584,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.058401715885459754,
"min": 0.026801311645976968,
"max": 0.058401715885459754,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05825640165971385,
"min": 0.02434697070469459,
"max": 0.0641153351093332,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17476920497914156,
"min": 0.04869394140938918,
"max": 0.19225430066386856,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.744098752000003e-06,
"min": 3.744098752000003e-06,
"max": 0.0002952716265761251,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.123229625600001e-05,
"min": 1.123229625600001e-05,
"max": 0.0008438893687035499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101248,
"min": 0.101248,
"max": 0.19842387500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303744,
"min": 0.20762059999999996,
"max": 0.5812964500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.227520000000008e-05,
"min": 7.227520000000008e-05,
"max": 0.0049213513625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021682560000000023,
"min": 0.00021682560000000023,
"max": 0.014066692854999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679178087",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679180394"
},
"total": 2306.992049067,
"count": 1,
"self": 0.43779158699999243,
"children": {
"run_training.setup": {
"total": 0.11513176599999042,
"count": 1,
"self": 0.11513176599999042
},
"TrainerController.start_learning": {
"total": 2306.439125714,
"count": 1,
"self": 4.110508128003858,
"children": {
"TrainerController._reset_env": {
"total": 8.040625581999961,
"count": 1,
"self": 8.040625581999961
},
"TrainerController.advance": {
"total": 2294.176912418996,
"count": 232865,
"self": 4.701575630801926,
"children": {
"env_step": {
"total": 1773.0940061500621,
"count": 232865,
"self": 1491.846356814146,
"children": {
"SubprocessEnvManager._take_step": {
"total": 278.4574406600002,
"count": 232865,
"self": 17.023534738963917,
"children": {
"TorchPolicy.evaluate": {
"total": 261.4339059210363,
"count": 222906,
"self": 261.4339059210363
}
}
},
"workers": {
"total": 2.7902086759160056,
"count": 232865,
"self": 0.0,
"children": {
"worker_root": {
"total": 2298.7045409330044,
"count": 232865,
"is_parallel": true,
"self": 1087.7984749579673,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009847549999904004,
"count": 1,
"is_parallel": true,
"self": 0.00025298399998519017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007317710000052102,
"count": 2,
"is_parallel": true,
"self": 0.0007317710000052102
}
}
},
"UnityEnvironment.step": {
"total": 0.02792700599991349,
"count": 1,
"is_parallel": true,
"self": 0.0002832509999279864,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019538499998361658,
"count": 1,
"is_parallel": true,
"self": 0.00019538499998361658
},
"communicator.exchange": {
"total": 0.02677439299998241,
"count": 1,
"is_parallel": true,
"self": 0.02677439299998241
},
"steps_from_proto": {
"total": 0.0006739770000194767,
"count": 1,
"is_parallel": true,
"self": 0.00020406199996614305,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046991500005333364,
"count": 2,
"is_parallel": true,
"self": 0.00046991500005333364
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1210.906065975037,
"count": 232864,
"is_parallel": true,
"self": 37.46407777830268,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.61056419089766,
"count": 232864,
"is_parallel": true,
"self": 75.61056419089766
},
"communicator.exchange": {
"total": 1009.8877585299497,
"count": 232864,
"is_parallel": true,
"self": 1009.8877585299497
},
"steps_from_proto": {
"total": 87.9436654758872,
"count": 232864,
"is_parallel": true,
"self": 33.40155510289287,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.54211037299433,
"count": 465728,
"is_parallel": true,
"self": 54.54211037299433
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 516.3813306381319,
"count": 232865,
"self": 6.759800092170963,
"children": {
"process_trajectory": {
"total": 142.06704447095944,
"count": 232865,
"self": 140.78986746095916,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2771770100002868,
"count": 10,
"self": 1.2771770100002868
}
}
},
"_update_policy": {
"total": 367.5544860750015,
"count": 97,
"self": 309.1890132440145,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.36547283098696,
"count": 2910,
"self": 58.36547283098696
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.920001164369751e-07,
"count": 1,
"self": 8.920001164369751e-07
},
"TrainerController._save_models": {
"total": 0.1110786930003087,
"count": 1,
"self": 0.0020294040004955605,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10904928899981314,
"count": 1,
"self": 0.10904928899981314
}
}
}
}
}
}
}