ppo-Huggy / run_logs /timers.json
mingdinghan's picture
Huggy
164979e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407649040222168,
"min": 1.407644271850586,
"max": 1.4290574789047241,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70323.328125,
"min": 67245.1640625,
"max": 79138.953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 119.85576923076923,
"min": 97.59288537549408,
"max": 429.017094017094,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49860.0,
"min": 48909.0,
"max": 50195.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999535.0,
"min": 49941.0,
"max": 1999535.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999535.0,
"min": 49941.0,
"max": 1999535.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.272613286972046,
"min": 0.037908460944890976,
"max": 2.3531789779663086,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 945.4071044921875,
"min": 4.39738130569458,
"max": 1149.66259765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.433997853014332,
"min": 1.8602386440696388,
"max": 3.9078905119782403,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1428.543106853962,
"min": 215.7876827120781,
"max": 1911.7222979664803,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.433997853014332,
"min": 1.8602386440696388,
"max": 3.9078905119782403,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1428.543106853962,
"min": 215.7876827120781,
"max": 1911.7222979664803,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018116674137248386,
"min": 0.013416410597468106,
"max": 0.01943851451441232,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03623334827449677,
"min": 0.026832821194936213,
"max": 0.05831554354323695,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04177611644069354,
"min": 0.021771889148900908,
"max": 0.05754196556905905,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08355223288138708,
"min": 0.043543778297801816,
"max": 0.17262589670717715,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.3890985370000095e-06,
"min": 4.3890985370000095e-06,
"max": 0.00029530890156369993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.778197074000019e-06,
"min": 8.778197074000019e-06,
"max": 0.0008436631687789499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10146300000000003,
"min": 0.10146300000000003,
"max": 0.1984363,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20292600000000005,
"min": 0.20292600000000005,
"max": 0.5812210500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.300370000000016e-05,
"min": 8.300370000000016e-05,
"max": 0.00492197137,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016600740000000032,
"min": 0.00016600740000000032,
"max": 0.014062930395000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676175045",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676177297"
},
"total": 2251.946930351,
"count": 1,
"self": 0.39337947600006373,
"children": {
"run_training.setup": {
"total": 0.10986742199997934,
"count": 1,
"self": 0.10986742199997934
},
"TrainerController.start_learning": {
"total": 2251.443683453,
"count": 1,
"self": 4.024353639927995,
"children": {
"TrainerController._reset_env": {
"total": 10.75338305899993,
"count": 1,
"self": 10.75338305899993
},
"TrainerController.advance": {
"total": 2236.560571698072,
"count": 231138,
"self": 4.296630548023586,
"children": {
"env_step": {
"total": 1739.1402950400018,
"count": 231138,
"self": 1451.558971966957,
"children": {
"SubprocessEnvManager._take_step": {
"total": 284.8635280880876,
"count": 231138,
"self": 15.144964609060594,
"children": {
"TorchPolicy.evaluate": {
"total": 269.718563479027,
"count": 223058,
"self": 68.25518328718272,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.4633801918443,
"count": 223058,
"self": 201.4633801918443
}
}
}
}
},
"workers": {
"total": 2.7177949849573224,
"count": 231138,
"self": 0.0,
"children": {
"worker_root": {
"total": 2243.4178333100767,
"count": 231138,
"is_parallel": true,
"self": 1067.937799511067,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022374320000153602,
"count": 1,
"is_parallel": true,
"self": 0.00036884700000427983,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018685850000110804,
"count": 2,
"is_parallel": true,
"self": 0.0018685850000110804
}
}
},
"UnityEnvironment.step": {
"total": 0.027226506000033623,
"count": 1,
"is_parallel": true,
"self": 0.0002185580000286791,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019716300005256926,
"count": 1,
"is_parallel": true,
"self": 0.00019716300005256926
},
"communicator.exchange": {
"total": 0.026239142999997966,
"count": 1,
"is_parallel": true,
"self": 0.026239142999997966
},
"steps_from_proto": {
"total": 0.0005716419999544087,
"count": 1,
"is_parallel": true,
"self": 0.0002168129999517987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00035482900000261,
"count": 2,
"is_parallel": true,
"self": 0.00035482900000261
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1175.4800337990098,
"count": 231137,
"is_parallel": true,
"self": 37.14364117497007,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.70449254813059,
"count": 231137,
"is_parallel": true,
"self": 75.70449254813059
},
"communicator.exchange": {
"total": 973.5536027639611,
"count": 231137,
"is_parallel": true,
"self": 973.5536027639611
},
"steps_from_proto": {
"total": 89.07829731194806,
"count": 231137,
"is_parallel": true,
"self": 36.15981818794853,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.91847912399953,
"count": 462274,
"is_parallel": true,
"self": 52.91847912399953
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 493.12364611004693,
"count": 231138,
"self": 6.495300968949664,
"children": {
"process_trajectory": {
"total": 150.84796701109985,
"count": 231138,
"self": 149.63711089809976,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2108561130000908,
"count": 10,
"self": 1.2108561130000908
}
}
},
"_update_policy": {
"total": 335.7803781299974,
"count": 96,
"self": 280.2423775489809,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.53800058101649,
"count": 2880,
"self": 55.53800058101649
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.100000741251279e-07,
"count": 1,
"self": 8.100000741251279e-07
},
"TrainerController._save_models": {
"total": 0.10537424599988299,
"count": 1,
"self": 0.00195911899982093,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10341512700006206,
"count": 1,
"self": 0.10341512700006206
}
}
}
}
}
}
}