ppo-Huggy / run_logs /timers.json
Penguin-N's picture
Huggy
50da39a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4055719375610352,
"min": 1.4055719375610352,
"max": 1.4298189878463745,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69955.3125,
"min": 68587.578125,
"max": 75869.4765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.17934782608695,
"min": 82.33558178752108,
"max": 415.40495867768595,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49779.0,
"min": 48699.0,
"max": 50264.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999632.0,
"min": 49948.0,
"max": 1999632.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999632.0,
"min": 49948.0,
"max": 1999632.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.42999005317688,
"min": -0.0069221872836351395,
"max": 2.463092565536499,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1341.3544921875,
"min": -0.8306624889373779,
"max": 1439.0931396484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7269748853168627,
"min": 1.7771967062105736,
"max": 3.965164220949699,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2057.290136694908,
"min": 213.26360474526882,
"max": 2299.7952481508255,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7269748853168627,
"min": 1.7771967062105736,
"max": 3.965164220949699,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2057.290136694908,
"min": 213.26360474526882,
"max": 2299.7952481508255,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01789728931350207,
"min": 0.013708304945127262,
"max": 0.02063556438855206,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.053691867940506205,
"min": 0.027416609890254524,
"max": 0.05578911823686212,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04955334361228678,
"min": 0.023354630420605343,
"max": 0.06438844613730907,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14866003083686033,
"min": 0.04670926084121069,
"max": 0.17310052004953225,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2714489095500064e-06,
"min": 3.2714489095500064e-06,
"max": 0.00029526525157824994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.81434672865002e-06,
"min": 9.81434672865002e-06,
"max": 0.0008436693187769001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10109044999999998,
"min": 0.10109044999999998,
"max": 0.19842175,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30327134999999994,
"min": 0.2073389,
"max": 0.5812231000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.44134550000001e-05,
"min": 6.44134550000001e-05,
"max": 0.0049212453249999994,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001932403650000003,
"min": 0.0001932403650000003,
"max": 0.01406303269,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695387242",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695390068"
},
"total": 2826.463971927,
"count": 1,
"self": 0.5371980219997567,
"children": {
"run_training.setup": {
"total": 0.04849689299999227,
"count": 1,
"self": 0.04849689299999227
},
"TrainerController.start_learning": {
"total": 2825.878277012,
"count": 1,
"self": 5.366721666916874,
"children": {
"TrainerController._reset_env": {
"total": 6.000879094999959,
"count": 1,
"self": 6.000879094999959
},
"TrainerController.advance": {
"total": 2814.380497071083,
"count": 232321,
"self": 5.361617479815777,
"children": {
"env_step": {
"total": 2174.182851661197,
"count": 232321,
"self": 1845.2833811563085,
"children": {
"SubprocessEnvManager._take_step": {
"total": 325.36616242789614,
"count": 232321,
"self": 18.928581490899376,
"children": {
"TorchPolicy.evaluate": {
"total": 306.43758093699677,
"count": 222910,
"self": 306.43758093699677
}
}
},
"workers": {
"total": 3.5333080769926823,
"count": 232321,
"self": 0.0,
"children": {
"worker_root": {
"total": 2817.059577143946,
"count": 232321,
"is_parallel": true,
"self": 1307.2161388118755,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010225620000028357,
"count": 1,
"is_parallel": true,
"self": 0.0002601300000151241,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007624319999877116,
"count": 2,
"is_parallel": true,
"self": 0.0007624319999877116
}
}
},
"UnityEnvironment.step": {
"total": 0.0576367639999944,
"count": 1,
"is_parallel": true,
"self": 0.0003907760000174676,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002611219999835157,
"count": 1,
"is_parallel": true,
"self": 0.0002611219999835157
},
"communicator.exchange": {
"total": 0.05618810900000426,
"count": 1,
"is_parallel": true,
"self": 0.05618810900000426
},
"steps_from_proto": {
"total": 0.0007967569999891566,
"count": 1,
"is_parallel": true,
"self": 0.0002423409999892101,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005544159999999465,
"count": 2,
"is_parallel": true,
"self": 0.0005544159999999465
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1509.8434383320707,
"count": 232320,
"is_parallel": true,
"self": 46.37204351824744,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.97691714389583,
"count": 232320,
"is_parallel": true,
"self": 89.97691714389583
},
"communicator.exchange": {
"total": 1261.178250637988,
"count": 232320,
"is_parallel": true,
"self": 1261.178250637988
},
"steps_from_proto": {
"total": 112.31622703193938,
"count": 232320,
"is_parallel": true,
"self": 39.879334302948735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.43689272899064,
"count": 464640,
"is_parallel": true,
"self": 72.43689272899064
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.8360279300703,
"count": 232321,
"self": 8.12333266705673,
"children": {
"process_trajectory": {
"total": 152.85391472901273,
"count": 232321,
"self": 151.23078405901225,
"children": {
"RLTrainer._checkpoint": {
"total": 1.623130670000478,
"count": 10,
"self": 1.623130670000478
}
}
},
"_update_policy": {
"total": 473.8587805340008,
"count": 97,
"self": 407.9624997179923,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.89628081600853,
"count": 2910,
"self": 65.89628081600853
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.770001270226203e-07,
"count": 1,
"self": 9.770001270226203e-07
},
"TrainerController._save_models": {
"total": 0.1301782020000246,
"count": 1,
"self": 0.002049456000349892,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12812874599967472,
"count": 1,
"self": 0.12812874599967472
}
}
}
}
}
}
}