ppo-Huggy / run_logs /timers.json
Pietro97's picture
Huggy
4b88120
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4098488092422485,
"min": 1.4098488092422485,
"max": 1.428809642791748,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71097.265625,
"min": 69417.0078125,
"max": 77504.921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.43288084464555,
"min": 74.43288084464555,
"max": 378.1954887218045,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49349.0,
"min": 49029.0,
"max": 50300.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999955.0,
"min": 49880.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999955.0,
"min": 49880.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.49356746673584,
"min": 0.20677919685840607,
"max": 2.49356746673584,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1653.2352294921875,
"min": 27.29485321044922,
"max": 1653.2352294921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.888462038601146,
"min": 1.9557037344484618,
"max": 3.9984165013692023,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2578.05033159256,
"min": 258.15289294719696,
"max": 2578.05033159256,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.888462038601146,
"min": 1.9557037344484618,
"max": 3.9984165013692023,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2578.05033159256,
"min": 258.15289294719696,
"max": 2578.05033159256,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01744018079350806,
"min": 0.012783522446018953,
"max": 0.020250169406871365,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05232054238052418,
"min": 0.025567044892037906,
"max": 0.06075050822061409,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.059156019116441416,
"min": 0.021148620266467334,
"max": 0.06343992451826733,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17746805734932425,
"min": 0.04229724053293467,
"max": 0.18394881847004096,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6866487711500006e-06,
"min": 3.6866487711500006e-06,
"max": 0.000295291726569425,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1059946313450002e-05,
"min": 1.1059946313450002e-05,
"max": 0.0008442198185934001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122885000000002,
"min": 0.10122885000000002,
"max": 0.19843057499999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30368655000000006,
"min": 0.20759079999999996,
"max": 0.5814066,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.1319615e-05,
"min": 7.1319615e-05,
"max": 0.004921685692500002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.000213958845,
"min": 0.000213958845,
"max": 0.014072189339999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683188805",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683191267"
},
"total": 2462.101113444,
"count": 1,
"self": 0.42778671499991106,
"children": {
"run_training.setup": {
"total": 0.03883223999991969,
"count": 1,
"self": 0.03883223999991969
},
"TrainerController.start_learning": {
"total": 2461.634494489,
"count": 1,
"self": 4.6545678401512305,
"children": {
"TrainerController._reset_env": {
"total": 4.6376015580000285,
"count": 1,
"self": 4.6376015580000285
},
"TrainerController.advance": {
"total": 2452.2062860308492,
"count": 232785,
"self": 4.576237121858867,
"children": {
"env_step": {
"total": 1922.3747441629991,
"count": 232785,
"self": 1626.0695339980982,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.37260454688396,
"count": 232785,
"self": 17.467776428892762,
"children": {
"TorchPolicy.evaluate": {
"total": 275.9048281179912,
"count": 222938,
"self": 275.9048281179912
}
}
},
"workers": {
"total": 2.9326056180169644,
"count": 232785,
"self": 0.0,
"children": {
"worker_root": {
"total": 2453.478905479019,
"count": 232785,
"is_parallel": true,
"self": 1122.487418964943,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007925019999674987,
"count": 1,
"is_parallel": true,
"self": 0.00026777699986269,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005247250001048087,
"count": 2,
"is_parallel": true,
"self": 0.0005247250001048087
}
}
},
"UnityEnvironment.step": {
"total": 0.04679029900000842,
"count": 1,
"is_parallel": true,
"self": 0.00033734400005869247,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020189599990771967,
"count": 1,
"is_parallel": true,
"self": 0.00020189599990771967
},
"communicator.exchange": {
"total": 0.04550240599996869,
"count": 1,
"is_parallel": true,
"self": 0.04550240599996869
},
"steps_from_proto": {
"total": 0.0007486530000733183,
"count": 1,
"is_parallel": true,
"self": 0.0002328030000171566,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005158500000561617,
"count": 2,
"is_parallel": true,
"self": 0.0005158500000561617
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1330.991486514076,
"count": 232784,
"is_parallel": true,
"self": 39.39798687906,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.54341739299639,
"count": 232784,
"is_parallel": true,
"self": 83.54341739299639
},
"communicator.exchange": {
"total": 1110.8858581890513,
"count": 232784,
"is_parallel": true,
"self": 1110.8858581890513
},
"steps_from_proto": {
"total": 97.1642240529685,
"count": 232784,
"is_parallel": true,
"self": 38.0124573190991,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.15176673386941,
"count": 465568,
"is_parallel": true,
"self": 59.15176673386941
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 525.2553047459911,
"count": 232785,
"self": 6.646005157971217,
"children": {
"process_trajectory": {
"total": 139.84165080002015,
"count": 232785,
"self": 138.36982841901954,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4718223810006066,
"count": 10,
"self": 1.4718223810006066
}
}
},
"_update_policy": {
"total": 378.76764878799975,
"count": 97,
"self": 319.9693614080021,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.798287379997646,
"count": 2910,
"self": 58.798287379997646
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0429998837935273e-06,
"count": 1,
"self": 1.0429998837935273e-06
},
"TrainerController._save_models": {
"total": 0.1360380169999189,
"count": 1,
"self": 0.003054159999919648,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13298385699999926,
"count": 1,
"self": 0.13298385699999926
}
}
}
}
}
}
}