ppo-Huggy / run_logs /timers.json
StevenPerrin's picture
Huggy
8373b41
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405860424041748,
"min": 1.4058581590652466,
"max": 1.42558753490448,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69459.34375,
"min": 69327.28125,
"max": 76990.84375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.45742904841403,
"min": 76.26584234930448,
"max": 400.1031746031746,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49392.0,
"min": 48934.0,
"max": 50413.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999953.0,
"min": 49878.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999953.0,
"min": 49878.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4598076343536377,
"min": 0.06509481370449066,
"max": 2.5016250610351562,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1473.4248046875,
"min": 8.13685131072998,
"max": 1533.237060546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7534075132594484,
"min": 1.974571337223053,
"max": 4.005359681421834,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2248.2911004424095,
"min": 246.82141715288162,
"max": 2495.3390815258026,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7534075132594484,
"min": 1.974571337223053,
"max": 4.005359681421834,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2248.2911004424095,
"min": 246.82141715288162,
"max": 2495.3390815258026,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015170678576719688,
"min": 0.012810706735369118,
"max": 0.019864991551730785,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.045512035730159064,
"min": 0.025621413470738236,
"max": 0.05917892977425557,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06177259820202987,
"min": 0.022864542560031016,
"max": 0.06466928143054247,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1853177946060896,
"min": 0.04572908512006203,
"max": 0.18851040614147982,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.840398719899996e-06,
"min": 3.840398719899996e-06,
"max": 0.00029535330154889993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1521196159699988e-05,
"min": 1.1521196159699988e-05,
"max": 0.0008438554687148498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012801,
"min": 0.1012801,
"max": 0.19845110000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038403,
"min": 0.20769090000000007,
"max": 0.5812851499999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.387698999999996e-05,
"min": 7.387698999999996e-05,
"max": 0.00492270989,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022163096999999987,
"min": 0.00022163096999999987,
"max": 0.014066128985000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699541904",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699544453"
},
"total": 2548.9840381860004,
"count": 1,
"self": 0.49057761000040045,
"children": {
"run_training.setup": {
"total": 0.04734232200007682,
"count": 1,
"self": 0.04734232200007682
},
"TrainerController.start_learning": {
"total": 2548.446118254,
"count": 1,
"self": 4.632292095946468,
"children": {
"TrainerController._reset_env": {
"total": 10.270430169000065,
"count": 1,
"self": 10.270430169000065
},
"TrainerController.advance": {
"total": 2533.4422995130535,
"count": 233030,
"self": 4.794425231015339,
"children": {
"env_step": {
"total": 1993.4267956580684,
"count": 233030,
"self": 1645.4281736452047,
"children": {
"SubprocessEnvManager._take_step": {
"total": 345.10790146194563,
"count": 233030,
"self": 17.44486410497734,
"children": {
"TorchPolicy.evaluate": {
"total": 327.6630373569683,
"count": 222908,
"self": 327.6630373569683
}
}
},
"workers": {
"total": 2.890720550918104,
"count": 233030,
"self": 0.0,
"children": {
"worker_root": {
"total": 2540.9853053789934,
"count": 233030,
"is_parallel": true,
"self": 1193.9461645030094,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009380649999002344,
"count": 1,
"is_parallel": true,
"self": 0.000236407999864241,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007016570000359934,
"count": 2,
"is_parallel": true,
"self": 0.0007016570000359934
}
}
},
"UnityEnvironment.step": {
"total": 0.030905327000027683,
"count": 1,
"is_parallel": true,
"self": 0.00032309500011251657,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023431699992215727,
"count": 1,
"is_parallel": true,
"self": 0.00023431699992215727
},
"communicator.exchange": {
"total": 0.029642856999998912,
"count": 1,
"is_parallel": true,
"self": 0.029642856999998912
},
"steps_from_proto": {
"total": 0.0007050579999940965,
"count": 1,
"is_parallel": true,
"self": 0.0001972299999124516,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005078280000816449,
"count": 2,
"is_parallel": true,
"self": 0.0005078280000816449
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1347.039140875984,
"count": 233029,
"is_parallel": true,
"self": 42.39428583212043,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.693177397937,
"count": 233029,
"is_parallel": true,
"self": 87.693177397937
},
"communicator.exchange": {
"total": 1121.838652780965,
"count": 233029,
"is_parallel": true,
"self": 1121.838652780965
},
"steps_from_proto": {
"total": 95.11302486496152,
"count": 233029,
"is_parallel": true,
"self": 33.605799494968096,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.50722536999342,
"count": 466058,
"is_parallel": true,
"self": 61.50722536999342
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 535.2210786239698,
"count": 233030,
"self": 6.643071083993391,
"children": {
"process_trajectory": {
"total": 162.32843653697478,
"count": 233030,
"self": 161.05511531197385,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2733212250009274,
"count": 10,
"self": 1.2733212250009274
}
}
},
"_update_policy": {
"total": 366.2495710030016,
"count": 97,
"self": 300.61037474199975,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.63919626100187,
"count": 2910,
"self": 65.63919626100187
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.830002116155811e-07,
"count": 1,
"self": 8.830002116155811e-07
},
"TrainerController._save_models": {
"total": 0.10109559299962712,
"count": 1,
"self": 0.001960318999408628,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09913527400021849,
"count": 1,
"self": 0.09913527400021849
}
}
}
}
}
}
}