ppo-Huggy / run_logs /timers.json
Your-Cheese's picture
Huggy
2ccc89e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4087860584259033,
"min": 1.4087860584259033,
"max": 1.4292200803756714,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71085.9375,
"min": 68517.4375,
"max": 77614.5703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.61048689138576,
"min": 82.25956738768718,
"max": 439.9561403508772,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49454.0,
"min": 48809.0,
"max": 50216.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999977.0,
"min": 49930.0,
"max": 1999977.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999977.0,
"min": 49930.0,
"max": 1999977.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.383768081665039,
"min": 0.049602434039115906,
"max": 2.4546496868133545,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1272.93212890625,
"min": 5.605074882507324,
"max": 1426.15625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6271619608116508,
"min": 1.8157260858379634,
"max": 3.952645627064819,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1936.9044870734215,
"min": 205.17704769968987,
"max": 2325.8180208206177,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6271619608116508,
"min": 1.8157260858379634,
"max": 3.952645627064819,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1936.9044870734215,
"min": 205.17704769968987,
"max": 2325.8180208206177,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015090832402145477,
"min": 0.012912605788733345,
"max": 0.01981800891501027,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04527249720643643,
"min": 0.02582521157746669,
"max": 0.05945402674503081,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05420269643266996,
"min": 0.022209505178034307,
"max": 0.05881228898134497,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16260808929800988,
"min": 0.044419010356068614,
"max": 0.1764368669440349,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5657488114499974e-06,
"min": 3.5657488114499974e-06,
"max": 0.00029530800156399995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0697246434349992e-05,
"min": 1.0697246434349992e-05,
"max": 0.0008439991686669499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118854999999999,
"min": 0.10118854999999999,
"max": 0.19843599999999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30356564999999996,
"min": 0.20751475000000003,
"max": 0.5813330500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.930864499999995e-05,
"min": 6.930864499999995e-05,
"max": 0.004921956399999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020792593499999987,
"min": 0.00020792593499999987,
"max": 0.014068519195,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676851570",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676856027"
},
"total": 4456.309646752,
"count": 1,
"self": 1.0496599469997818,
"children": {
"run_training.setup": {
"total": 0.13182788099999243,
"count": 1,
"self": 0.13182788099999243
},
"TrainerController.start_learning": {
"total": 4455.128158924,
"count": 1,
"self": 7.593636891954702,
"children": {
"TrainerController._reset_env": {
"total": 7.228833889999976,
"count": 1,
"self": 7.228833889999976
},
"TrainerController.advance": {
"total": 4440.113941238045,
"count": 232417,
"self": 8.477564169148536,
"children": {
"env_step": {
"total": 2784.307463514896,
"count": 232417,
"self": 2357.0348645579306,
"children": {
"SubprocessEnvManager._take_step": {
"total": 422.18586435301074,
"count": 232417,
"self": 26.319140903125856,
"children": {
"TorchPolicy.evaluate": {
"total": 395.8667234498849,
"count": 223028,
"self": 56.81604120090037,
"children": {
"TorchPolicy.sample_actions": {
"total": 339.0506822489845,
"count": 223028,
"self": 339.0506822489845
}
}
}
}
},
"workers": {
"total": 5.086734603954426,
"count": 232417,
"self": 0.0,
"children": {
"worker_root": {
"total": 4439.520794527961,
"count": 232417,
"is_parallel": true,
"self": 2555.57222952591,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006946400000003905,
"count": 1,
"is_parallel": true,
"self": 0.0005393490000074053,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0064070509999965,
"count": 2,
"is_parallel": true,
"self": 0.0064070509999965
}
}
},
"UnityEnvironment.step": {
"total": 0.04887942200002726,
"count": 1,
"is_parallel": true,
"self": 0.0003871430000685905,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020978800000648334,
"count": 1,
"is_parallel": true,
"self": 0.00020978800000648334
},
"communicator.exchange": {
"total": 0.046751306999965436,
"count": 1,
"is_parallel": true,
"self": 0.046751306999965436
},
"steps_from_proto": {
"total": 0.0015311839999867516,
"count": 1,
"is_parallel": true,
"self": 0.0003403279999361075,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001190856000050644,
"count": 2,
"is_parallel": true,
"self": 0.001190856000050644
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1883.9485650020504,
"count": 232416,
"is_parallel": true,
"self": 58.35236138793198,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.46775879405823,
"count": 232416,
"is_parallel": true,
"self": 87.46775879405823
},
"communicator.exchange": {
"total": 1598.4836456881576,
"count": 232416,
"is_parallel": true,
"self": 1598.4836456881576
},
"steps_from_proto": {
"total": 139.64479913190252,
"count": 232416,
"is_parallel": true,
"self": 53.394614276875416,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.2501848550271,
"count": 464832,
"is_parallel": true,
"self": 86.2501848550271
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1647.3289135540012,
"count": 232417,
"self": 12.62272259993415,
"children": {
"process_trajectory": {
"total": 264.3031866520655,
"count": 232417,
"self": 262.83021821706575,
"children": {
"RLTrainer._checkpoint": {
"total": 1.472968434999757,
"count": 10,
"self": 1.472968434999757
}
}
},
"_update_policy": {
"total": 1370.4030043020016,
"count": 97,
"self": 363.4393697040016,
"children": {
"TorchPPOOptimizer.update": {
"total": 1006.963634598,
"count": 2910,
"self": 1006.963634598
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5470004655071534e-06,
"count": 1,
"self": 1.5470004655071534e-06
},
"TrainerController._save_models": {
"total": 0.19174535699949047,
"count": 1,
"self": 0.0038765659992350265,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18786879100025544,
"count": 1,
"self": 0.18786879100025544
}
}
}
}
}
}
}