ppo-Huggy / run_logs /timers.json
ORromu's picture
Huggy
eaf6f5d verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.413012146949768,
"min": 1.412990689277649,
"max": 1.4303752183914185,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69168.359375,
"min": 68823.671875,
"max": 76321.6953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.65708812260536,
"min": 83.57360406091371,
"max": 373.47014925373134,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49411.0,
"min": 48863.0,
"max": 50214.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999979.0,
"min": 49458.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999979.0,
"min": 49458.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3710665702819824,
"min": -0.03072829730808735,
"max": 2.439289093017578,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1237.69677734375,
"min": -4.0868635177612305,
"max": 1397.437744140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7766955862556837,
"min": 1.8246542150364782,
"max": 3.9379762530556532,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1971.435096025467,
"min": 242.6790105998516,
"max": 2233.0594363212585,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7766955862556837,
"min": 1.8246542150364782,
"max": 3.9379762530556532,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1971.435096025467,
"min": 242.6790105998516,
"max": 2233.0594363212585,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01642108028172515,
"min": 0.014492299639581083,
"max": 0.020209692497640694,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04926324084517546,
"min": 0.028984599279162165,
"max": 0.05555697321445526,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05284126587212085,
"min": 0.023419557884335515,
"max": 0.06408375687897205,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15852379761636257,
"min": 0.04683911576867103,
"max": 0.17247080753246943,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.200548933183332e-06,
"min": 3.200548933183332e-06,
"max": 0.00029538037653987505,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.601646799549995e-06,
"min": 9.601646799549995e-06,
"max": 0.0008442391685869498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106681666666666,
"min": 0.10106681666666666,
"max": 0.198460125,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30320044999999995,
"min": 0.20730205000000002,
"max": 0.5814130500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.323415166666666e-05,
"min": 6.323415166666666e-05,
"max": 0.0049231602375,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018970245499999998,
"min": 0.00018970245499999998,
"max": 0.014072511195,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706792887",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706795166"
},
"total": 2279.6840992700004,
"count": 1,
"self": 0.3956433550001748,
"children": {
"run_training.setup": {
"total": 0.05572777600036716,
"count": 1,
"self": 0.05572777600036716
},
"TrainerController.start_learning": {
"total": 2279.232728139,
"count": 1,
"self": 4.406928712916397,
"children": {
"TrainerController._reset_env": {
"total": 2.9832809889999226,
"count": 1,
"self": 2.9832809889999226
},
"TrainerController.advance": {
"total": 2271.731045180083,
"count": 231861,
"self": 4.490694665771571,
"children": {
"env_step": {
"total": 1802.0059917621975,
"count": 231861,
"self": 1488.8873068861976,
"children": {
"SubprocessEnvManager._take_step": {
"total": 310.4624894419526,
"count": 231861,
"self": 16.941473749120178,
"children": {
"TorchPolicy.evaluate": {
"total": 293.5210156928324,
"count": 222850,
"self": 293.5210156928324
}
}
},
"workers": {
"total": 2.6561954340472766,
"count": 231861,
"self": 0.0,
"children": {
"worker_root": {
"total": 2272.185640268002,
"count": 231861,
"is_parallel": true,
"self": 1062.2214287719366,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008804019998933654,
"count": 1,
"is_parallel": true,
"self": 0.0002442659997541341,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006361360001392313,
"count": 2,
"is_parallel": true,
"self": 0.0006361360001392313
}
}
},
"UnityEnvironment.step": {
"total": 0.02881730899980539,
"count": 1,
"is_parallel": true,
"self": 0.000337955999384576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022517200022775796,
"count": 1,
"is_parallel": true,
"self": 0.00022517200022775796
},
"communicator.exchange": {
"total": 0.027557505000004312,
"count": 1,
"is_parallel": true,
"self": 0.027557505000004312
},
"steps_from_proto": {
"total": 0.0006966760001887451,
"count": 1,
"is_parallel": true,
"self": 0.0001926040004036622,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005040719997850829,
"count": 2,
"is_parallel": true,
"self": 0.0005040719997850829
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1209.9642114960652,
"count": 231860,
"is_parallel": true,
"self": 40.1170913499177,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.2234753169173,
"count": 231860,
"is_parallel": true,
"self": 81.2234753169173
},
"communicator.exchange": {
"total": 1000.8367559952117,
"count": 231860,
"is_parallel": true,
"self": 1000.8367559952117
},
"steps_from_proto": {
"total": 87.78688883401855,
"count": 231860,
"is_parallel": true,
"self": 30.933046540114447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.85384229390411,
"count": 463720,
"is_parallel": true,
"self": 56.85384229390411
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 465.2343587521141,
"count": 231861,
"self": 6.183017675009523,
"children": {
"process_trajectory": {
"total": 139.0422440451025,
"count": 231861,
"self": 137.8009936531016,
"children": {
"RLTrainer._checkpoint": {
"total": 1.241250392000893,
"count": 10,
"self": 1.241250392000893
}
}
},
"_update_policy": {
"total": 320.00909703200205,
"count": 97,
"self": 257.4708022820105,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.538294749991564,
"count": 2910,
"self": 62.538294749991564
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.730003173695877e-07,
"count": 1,
"self": 8.730003173695877e-07
},
"TrainerController._save_models": {
"total": 0.11147238400008064,
"count": 1,
"self": 0.002242098999886366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10923028500019427,
"count": 1,
"self": 0.10923028500019427
}
}
}
}
}
}
}