ppo-Huggy / run_logs /timers.json
Destiny0621's picture
Huggy
cefe1f8
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406259536743164,
"min": 1.406259536743164,
"max": 1.4298001527786255,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71070.953125,
"min": 67192.6875,
"max": 79608.1015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.8375,
"min": 88.34280639431617,
"max": 375.187969924812,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49842.0,
"min": 49164.0,
"max": 49921.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999607.0,
"min": 49550.0,
"max": 1999607.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999607.0,
"min": 49550.0,
"max": 1999607.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3261377811431885,
"min": 0.13574489951133728,
"max": 2.423707962036133,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1116.546142578125,
"min": 17.91832733154297,
"max": 1323.3446044921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.533173110584418,
"min": 1.7383253865621306,
"max": 3.888927292268715,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1695.9230930805206,
"min": 229.45895102620125,
"max": 2078.0930726528168,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.533173110584418,
"min": 1.7383253865621306,
"max": 3.888927292268715,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1695.9230930805206,
"min": 229.45895102620125,
"max": 2078.0930726528168,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018315091429232985,
"min": 0.013727532109805906,
"max": 0.019239620664787557,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05494527428769895,
"min": 0.027455064219611813,
"max": 0.055743372597741356,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05122981969681051,
"min": 0.02188951565573613,
"max": 0.061367499207456905,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15368945909043152,
"min": 0.04377903131147226,
"max": 0.18410249762237071,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.266798911099999e-06,
"min": 3.266798911099999e-06,
"max": 0.00029532330155890003,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.800396733299998e-06,
"min": 9.800396733299998e-06,
"max": 0.0008442393185868998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108889999999998,
"min": 0.10108889999999998,
"max": 0.19844109999999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30326669999999994,
"min": 0.20730525000000005,
"max": 0.5814131,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.433611000000002e-05,
"min": 6.433611000000002e-05,
"max": 0.0049222108900000006,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019300833000000006,
"min": 0.00019300833000000006,
"max": 0.01407251369,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695208371",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695210874"
},
"total": 2502.8018295439997,
"count": 1,
"self": 0.44234815999971033,
"children": {
"run_training.setup": {
"total": 0.06566901500002587,
"count": 1,
"self": 0.06566901500002587
},
"TrainerController.start_learning": {
"total": 2502.293812369,
"count": 1,
"self": 4.500375942912797,
"children": {
"TrainerController._reset_env": {
"total": 4.996839191000049,
"count": 1,
"self": 4.996839191000049
},
"TrainerController.advance": {
"total": 2492.669523439087,
"count": 231790,
"self": 4.620009810227202,
"children": {
"env_step": {
"total": 1926.7616455069883,
"count": 231790,
"self": 1625.3007979190234,
"children": {
"SubprocessEnvManager._take_step": {
"total": 298.43505598696686,
"count": 231790,
"self": 16.545508152974207,
"children": {
"TorchPolicy.evaluate": {
"total": 281.88954783399265,
"count": 222980,
"self": 281.88954783399265
}
}
},
"workers": {
"total": 3.025791600998218,
"count": 231790,
"self": 0.0,
"children": {
"worker_root": {
"total": 2494.7233527039466,
"count": 231790,
"is_parallel": true,
"self": 1167.382374542029,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010776249999935317,
"count": 1,
"is_parallel": true,
"self": 0.0003049300000839139,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007726949999096178,
"count": 2,
"is_parallel": true,
"self": 0.0007726949999096178
}
}
},
"UnityEnvironment.step": {
"total": 0.03400693499997942,
"count": 1,
"is_parallel": true,
"self": 0.0003703559999621575,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002374829999780559,
"count": 1,
"is_parallel": true,
"self": 0.0002374829999780559
},
"communicator.exchange": {
"total": 0.032589311000037924,
"count": 1,
"is_parallel": true,
"self": 0.032589311000037924
},
"steps_from_proto": {
"total": 0.0008097850000012841,
"count": 1,
"is_parallel": true,
"self": 0.00022993200002474623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005798529999765378,
"count": 2,
"is_parallel": true,
"self": 0.0005798529999765378
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1327.3409781619175,
"count": 231789,
"is_parallel": true,
"self": 40.634418869893125,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.4230300610401,
"count": 231789,
"is_parallel": true,
"self": 82.4230300610401
},
"communicator.exchange": {
"total": 1103.6502710000545,
"count": 231789,
"is_parallel": true,
"self": 1103.6502710000545
},
"steps_from_proto": {
"total": 100.63325823092953,
"count": 231789,
"is_parallel": true,
"self": 35.397105210954464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.23615301997506,
"count": 463578,
"is_parallel": true,
"self": 65.23615301997506
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 561.2878681218713,
"count": 231790,
"self": 6.753259892876713,
"children": {
"process_trajectory": {
"total": 138.048690026994,
"count": 231790,
"self": 136.60352156299416,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4451684639998348,
"count": 10,
"self": 1.4451684639998348
}
}
},
"_update_policy": {
"total": 416.48591820200056,
"count": 97,
"self": 355.7086488049962,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.777269397004375,
"count": 2910,
"self": 60.777269397004375
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0700000530050602e-06,
"count": 1,
"self": 1.0700000530050602e-06
},
"TrainerController._save_models": {
"total": 0.12707272600027864,
"count": 1,
"self": 0.002142632999948546,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12493009300033009,
"count": 1,
"self": 0.12493009300033009
}
}
}
}
}
}
}