ppo-Huggy / run_logs /timers.json
AdityaNerpagar's picture
HuggyV1
aa60f07 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4050829410552979,
"min": 1.4050829410552979,
"max": 1.4312736988067627,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71182.90625,
"min": 68112.046875,
"max": 76775.1640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.18233082706767,
"min": 85.58854166666667,
"max": 376.9248120300752,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50105.0,
"min": 48765.0,
"max": 50235.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999893.0,
"min": 49658.0,
"max": 1999893.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999893.0,
"min": 49658.0,
"max": 1999893.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.429332733154297,
"min": 0.05440015718340874,
"max": 2.463397979736328,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1292.405029296875,
"min": 7.180820941925049,
"max": 1388.13427734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.734066773177986,
"min": 1.7610503062605858,
"max": 3.869477611679568,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1986.5235233306885,
"min": 232.45864042639732,
"max": 2177.3976811766624,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.734066773177986,
"min": 1.7610503062605858,
"max": 3.869477611679568,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1986.5235233306885,
"min": 232.45864042639732,
"max": 2177.3976811766624,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016690605871311465,
"min": 0.014651255109735455,
"max": 0.020150889626852908,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03338121174262293,
"min": 0.02930251021947091,
"max": 0.060452668880558724,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0495061568915844,
"min": 0.022033094459523755,
"max": 0.06073451745841238,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.0990123137831688,
"min": 0.04406618891904751,
"max": 0.18220355237523714,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.539173486975003e-06,
"min": 4.539173486975003e-06,
"max": 0.00029530620156460003,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.078346973950006e-06,
"min": 9.078346973950006e-06,
"max": 0.0008436277687907501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101513025,
"min": 0.101513025,
"max": 0.19843539999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20302605,
"min": 0.20302605,
"max": 0.5812092499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.549994750000007e-05,
"min": 8.549994750000007e-05,
"max": 0.00492192646,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017099989500000013,
"min": 0.00017099989500000013,
"max": 0.014062341575000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710927177",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710929663"
},
"total": 2486.316922448,
"count": 1,
"self": 0.44050567299927934,
"children": {
"run_training.setup": {
"total": 0.0635686440000427,
"count": 1,
"self": 0.0635686440000427
},
"TrainerController.start_learning": {
"total": 2485.8128481310005,
"count": 1,
"self": 4.568259591011611,
"children": {
"TrainerController._reset_env": {
"total": 3.4952438779999966,
"count": 1,
"self": 3.4952438779999966
},
"TrainerController.advance": {
"total": 2477.6336126149886,
"count": 231969,
"self": 4.765573895985199,
"children": {
"env_step": {
"total": 2003.5869183740087,
"count": 231969,
"self": 1657.8773097730505,
"children": {
"SubprocessEnvManager._take_step": {
"total": 342.6633224139106,
"count": 231969,
"self": 17.2467967409101,
"children": {
"TorchPolicy.evaluate": {
"total": 325.4165256730005,
"count": 223020,
"self": 325.4165256730005
}
}
},
"workers": {
"total": 3.0462861870477127,
"count": 231969,
"self": 0.0,
"children": {
"worker_root": {
"total": 2477.89406763205,
"count": 231969,
"is_parallel": true,
"self": 1141.4140979640417,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011003810000147496,
"count": 1,
"is_parallel": true,
"self": 0.0002801790000148685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008202019999998811,
"count": 2,
"is_parallel": true,
"self": 0.0008202019999998811
}
}
},
"UnityEnvironment.step": {
"total": 0.03865531300004932,
"count": 1,
"is_parallel": true,
"self": 0.000389257999813708,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021474800007581507,
"count": 1,
"is_parallel": true,
"self": 0.00021474800007581507
},
"communicator.exchange": {
"total": 0.037239209000063056,
"count": 1,
"is_parallel": true,
"self": 0.037239209000063056
},
"steps_from_proto": {
"total": 0.0008120980000967393,
"count": 1,
"is_parallel": true,
"self": 0.00022547600008238078,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005866220000143585,
"count": 2,
"is_parallel": true,
"self": 0.0005866220000143585
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1336.4799696680082,
"count": 231968,
"is_parallel": true,
"self": 40.423128131077874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.60359302202585,
"count": 231968,
"is_parallel": true,
"self": 87.60359302202585
},
"communicator.exchange": {
"total": 1112.5226101899539,
"count": 231968,
"is_parallel": true,
"self": 1112.5226101899539
},
"steps_from_proto": {
"total": 95.93063832495056,
"count": 231968,
"is_parallel": true,
"self": 36.28408901206035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.646549312890215,
"count": 463936,
"is_parallel": true,
"self": 59.646549312890215
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 469.2811203449944,
"count": 231969,
"self": 7.0059120838178615,
"children": {
"process_trajectory": {
"total": 160.52367191917608,
"count": 231969,
"self": 159.04552774817637,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4781441709997125,
"count": 10,
"self": 1.4781441709997125
}
}
},
"_update_policy": {
"total": 301.75153634200046,
"count": 96,
"self": 239.69841476,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.05312158200047,
"count": 2880,
"self": 62.05312158200047
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.270002010453027e-07,
"count": 1,
"self": 9.270002010453027e-07
},
"TrainerController._save_models": {
"total": 0.115731119999964,
"count": 1,
"self": 0.001910612999836303,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11382050700012769,
"count": 1,
"self": 0.11382050700012769
}
}
}
}
}
}
}