HuggyPPO / run_logs /timers.json
Bytte's picture
Huggy
3cc2d6e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4029492139816284,
"min": 1.4029347896575928,
"max": 1.4283720254898071,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69430.5546875,
"min": 69128.3984375,
"max": 77124.5078125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.69204152249135,
"min": 76.96567862714508,
"max": 382.35114503816794,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49530.0,
"min": 48830.0,
"max": 50163.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999987.0,
"min": 49933.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999987.0,
"min": 49933.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.454929828643799,
"min": 0.14523178339004517,
"max": 2.474435567855835,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1418.949462890625,
"min": 18.8801326751709,
"max": 1510.5880126953125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8383914849956144,
"min": 1.8017605586693837,
"max": 3.945117281812482,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2218.590278327465,
"min": 234.22887262701988,
"max": 2391.371473789215,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8383914849956144,
"min": 1.8017605586693837,
"max": 3.945117281812482,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2218.590278327465,
"min": 234.22887262701988,
"max": 2391.371473789215,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018534085372610004,
"min": 0.014780998124782247,
"max": 0.0203014154258805,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05560225611783001,
"min": 0.029561996249564494,
"max": 0.05621791743518163,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05369358207616542,
"min": 0.021812425305445988,
"max": 0.060067690784732494,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16108074622849625,
"min": 0.043624850610891976,
"max": 0.17663233441611131,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5362488212833267e-06,
"min": 3.5362488212833267e-06,
"max": 0.00029532225155925,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.060874646384998e-05,
"min": 1.060874646384998e-05,
"max": 0.0008437258687580498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117871666666665,
"min": 0.10117871666666665,
"max": 0.19844075000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30353614999999995,
"min": 0.20753254999999995,
"max": 0.58124195,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.881796166666658e-05,
"min": 6.881796166666658e-05,
"max": 0.004922193425,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002064538849999997,
"min": 0.0002064538849999997,
"max": 0.014063973304999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694257122",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694259626"
},
"total": 2503.450561657,
"count": 1,
"self": 0.4981367849995877,
"children": {
"run_training.setup": {
"total": 0.044617027000072085,
"count": 1,
"self": 0.044617027000072085
},
"TrainerController.start_learning": {
"total": 2502.907807845,
"count": 1,
"self": 4.752535818964134,
"children": {
"TrainerController._reset_env": {
"total": 4.737049314000046,
"count": 1,
"self": 4.737049314000046
},
"TrainerController.advance": {
"total": 2493.2939379150357,
"count": 232630,
"self": 5.08395982197726,
"children": {
"env_step": {
"total": 1919.4921043060756,
"count": 232630,
"self": 1624.5275140733156,
"children": {
"SubprocessEnvManager._take_step": {
"total": 291.9321524128949,
"count": 232630,
"self": 17.15241219098391,
"children": {
"TorchPolicy.evaluate": {
"total": 274.77974022191097,
"count": 222910,
"self": 274.77974022191097
}
}
},
"workers": {
"total": 3.0324378198650948,
"count": 232630,
"self": 0.0,
"children": {
"worker_root": {
"total": 2495.179088350899,
"count": 232630,
"is_parallel": true,
"self": 1170.6625956158948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009015650000492315,
"count": 1,
"is_parallel": true,
"self": 0.00022001800005000405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006815469999992274,
"count": 2,
"is_parallel": true,
"self": 0.0006815469999992274
}
}
},
"UnityEnvironment.step": {
"total": 0.0730927760000668,
"count": 1,
"is_parallel": true,
"self": 0.00034781700003350124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023398200005431136,
"count": 1,
"is_parallel": true,
"self": 0.00023398200005431136
},
"communicator.exchange": {
"total": 0.07169926899996426,
"count": 1,
"is_parallel": true,
"self": 0.07169926899996426
},
"steps_from_proto": {
"total": 0.0008117080000147325,
"count": 1,
"is_parallel": true,
"self": 0.00022694200004025333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005847659999744792,
"count": 2,
"is_parallel": true,
"self": 0.0005847659999744792
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1324.516492735004,
"count": 232629,
"is_parallel": true,
"self": 40.949508025100386,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.02543235493545,
"count": 232629,
"is_parallel": true,
"self": 82.02543235493545
},
"communicator.exchange": {
"total": 1100.859269154923,
"count": 232629,
"is_parallel": true,
"self": 1100.859269154923
},
"steps_from_proto": {
"total": 100.68228320004528,
"count": 232629,
"is_parallel": true,
"self": 35.430421042082344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.25186215796293,
"count": 465258,
"is_parallel": true,
"self": 65.25186215796293
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 568.7178737869829,
"count": 232630,
"self": 6.99618175494129,
"children": {
"process_trajectory": {
"total": 146.12991749504033,
"count": 232630,
"self": 144.71730423803933,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4126132570009986,
"count": 10,
"self": 1.4126132570009986
}
}
},
"_update_policy": {
"total": 415.59177453700124,
"count": 97,
"self": 354.0084867630079,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.583287773993334,
"count": 2910,
"self": 61.583287773993334
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.320001481682993e-07,
"count": 1,
"self": 9.320001481682993e-07
},
"TrainerController._save_models": {
"total": 0.12428386500005217,
"count": 1,
"self": 0.002218137999989267,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1220657270000629,
"count": 1,
"self": 0.1220657270000629
}
}
}
}
}
}
}