PPO-Huggy / run_logs /timers.json
NorbertRop's picture
Huggy
773bcc6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3766883611679077,
"min": 1.3766883611679077,
"max": 1.4273005723953247,
"count": 80
},
"Huggy.Policy.Entropy.sum": {
"value": 69269.453125,
"min": 67396.265625,
"max": 78155.953125,
"count": 80
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 69.95833333333333,
"min": 65.27718832891247,
"max": 380.8625954198473,
"count": 80
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48691.0,
"min": 48621.0,
"max": 50215.0,
"count": 80
},
"Huggy.Step.mean": {
"value": 3999898.0,
"min": 49477.0,
"max": 3999898.0,
"count": 80
},
"Huggy.Step.sum": {
"value": 3999898.0,
"min": 49477.0,
"max": 3999898.0,
"count": 80
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.528062582015991,
"min": 0.02807590924203396,
"max": 2.6201882362365723,
"count": 80
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1759.531494140625,
"min": 3.6498682498931885,
"max": 1899.295166015625,
"count": 80
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.855073126739469,
"min": 1.8250049696518824,
"max": 4.106077913529363,
"count": 80
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2683.1308962106705,
"min": 237.25064605474472,
"max": 2925.829711675644,
"count": 80
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.855073126739469,
"min": 1.8250049696518824,
"max": 4.106077913529363,
"count": 80
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2683.1308962106705,
"min": 237.25064605474472,
"max": 2925.829711675644,
"count": 80
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01652396729114116,
"min": 0.012445210263669853,
"max": 0.01972373293756391,
"count": 80
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03304793458228232,
"min": 0.028163065307307986,
"max": 0.055698246351433535,
"count": 80
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.052777691495915255,
"min": 0.023351419748117525,
"max": 0.060110583528876306,
"count": 80
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10555538299183051,
"min": 0.04670283949623505,
"max": 0.17375112747152646,
"count": 80
},
"Huggy.Policy.LearningRate.mean": {
"value": 1.5752494749499969e-06,
"min": 1.5752494749499969e-06,
"max": 0.0002976801757732749,
"count": 80
},
"Huggy.Policy.LearningRate.sum": {
"value": 3.1504989498999937e-06,
"min": 3.1504989498999937e-06,
"max": 0.000872139984286675,
"count": 80
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10052505000000003,
"min": 0.10052505000000003,
"max": 0.199226725,
"count": 80
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20105010000000006,
"min": 0.20105010000000006,
"max": 0.5907133250000001,
"count": 80
},
"Huggy.Policy.Beta.mean": {
"value": 3.619999499999996e-05,
"min": 3.619999499999996e-05,
"max": 0.0049614135775000005,
"count": 80
},
"Huggy.Policy.Beta.sum": {
"value": 7.239998999999992e-05,
"min": 7.239998999999992e-05,
"max": 0.014536594917500001,
"count": 80
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671889796",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671894499"
},
"total": 4702.712752505,
"count": 1,
"self": 0.4345088649997706,
"children": {
"run_training.setup": {
"total": 0.10919445499996527,
"count": 1,
"self": 0.10919445499996527
},
"TrainerController.start_learning": {
"total": 4702.169049185,
"count": 1,
"self": 8.308636300022044,
"children": {
"TrainerController._reset_env": {
"total": 9.136484830999962,
"count": 1,
"self": 9.136484830999962
},
"TrainerController.advance": {
"total": 4684.598155507979,
"count": 469751,
"self": 8.830049568807226,
"children": {
"env_step": {
"total": 3690.812493628232,
"count": 469751,
"self": 3093.4621853894223,
"children": {
"SubprocessEnvManager._take_step": {
"total": 591.8068855997124,
"count": 469751,
"self": 30.219885089532568,
"children": {
"TorchPolicy.evaluate": {
"total": 561.5870005101798,
"count": 445962,
"self": 137.87527316612454,
"children": {
"TorchPolicy.sample_actions": {
"total": 423.71172734405525,
"count": 445962,
"self": 423.71172734405525
}
}
}
}
},
"workers": {
"total": 5.543422639097344,
"count": 469751,
"self": 0.0,
"children": {
"worker_root": {
"total": 4685.990817634978,
"count": 469751,
"is_parallel": true,
"self": 2146.1005617268665,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002275258999986818,
"count": 1,
"is_parallel": true,
"self": 0.0003367419999449339,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019385170000418839,
"count": 2,
"is_parallel": true,
"self": 0.0019385170000418839
}
}
},
"UnityEnvironment.step": {
"total": 0.02951544399991235,
"count": 1,
"is_parallel": true,
"self": 0.0003099169998677098,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019954800006871665,
"count": 1,
"is_parallel": true,
"self": 0.00019954800006871665
},
"communicator.exchange": {
"total": 0.02818938099994739,
"count": 1,
"is_parallel": true,
"self": 0.02818938099994739
},
"steps_from_proto": {
"total": 0.0008165980000285344,
"count": 1,
"is_parallel": true,
"self": 0.0002823190001208786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005342789999076558,
"count": 2,
"is_parallel": true,
"self": 0.0005342789999076558
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2539.8902559081116,
"count": 469750,
"is_parallel": true,
"self": 71.84488918411307,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 168.13284282313384,
"count": 469750,
"is_parallel": true,
"self": 168.13284282313384
},
"communicator.exchange": {
"total": 2098.712469001878,
"count": 469750,
"is_parallel": true,
"self": 2098.712469001878
},
"steps_from_proto": {
"total": 201.20005489898665,
"count": 469750,
"is_parallel": true,
"self": 87.10449242603033,
"children": {
"_process_rank_one_or_two_observation": {
"total": 114.09556247295632,
"count": 939500,
"is_parallel": true,
"self": 114.09556247295632
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 984.95561231094,
"count": 469751,
"self": 12.785021479939815,
"children": {
"process_trajectory": {
"total": 341.2104331700061,
"count": 469751,
"self": 338.5625130980079,
"children": {
"RLTrainer._checkpoint": {
"total": 2.6479200719982146,
"count": 20,
"self": 2.6479200719982146
}
}
},
"_update_policy": {
"total": 630.960157660994,
"count": 194,
"self": 524.1272965709799,
"children": {
"TorchPPOOptimizer.update": {
"total": 106.83286109001415,
"count": 5820,
"self": 106.83286109001415
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1089996405644342e-06,
"count": 1,
"self": 1.1089996405644342e-06
},
"TrainerController._save_models": {
"total": 0.12577143700036686,
"count": 1,
"self": 0.003044043000045349,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12272739400032151,
"count": 1,
"self": 0.12272739400032151
}
}
}
}
}
}
}