ppo-Huggy / run_logs /timers.json
Neexa4One's picture
Huggy
dd283f3
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4042478799819946,
"min": 1.4042478799819946,
"max": 1.425980567932129,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69663.3359375,
"min": 67513.328125,
"max": 76349.5625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.21942446043165,
"min": 78.44992050874404,
"max": 400.424,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49050.0,
"min": 48842.0,
"max": 50071.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999990.0,
"min": 49538.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999990.0,
"min": 49538.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.447281837463379,
"min": 0.025695649906992912,
"max": 2.5372462272644043,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1360.688720703125,
"min": 3.18626070022583,
"max": 1554.9010009765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.773182193259541,
"min": 1.8520825952291489,
"max": 4.024492117767459,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2097.889299452305,
"min": 229.65824180841446,
"max": 2450.9156997203827,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.773182193259541,
"min": 1.8520825952291489,
"max": 4.024492117767459,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2097.889299452305,
"min": 229.65824180841446,
"max": 2450.9156997203827,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015080716383898915,
"min": 0.01448974387738215,
"max": 0.020287011452859993,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04524214915169675,
"min": 0.029623171131728063,
"max": 0.06086103435857998,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05598335050874287,
"min": 0.02254077590381106,
"max": 0.06075451200207074,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1679500515262286,
"min": 0.04508155180762212,
"max": 0.1798377410819133,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8130487290166707e-06,
"min": 3.8130487290166707e-06,
"max": 0.000295317226560925,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1439146187050012e-05,
"min": 1.1439146187050012e-05,
"max": 0.0008441976186007998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127098333333336,
"min": 0.10127098333333336,
"max": 0.19843907500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038129500000001,
"min": 0.20767315000000003,
"max": 0.5813992,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.342206833333339e-05,
"min": 7.342206833333339e-05,
"max": 0.004922109842500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022026620500000018,
"min": 0.00022026620500000018,
"max": 0.014071820080000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673339911",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673342092"
},
"total": 2181.166391285,
"count": 1,
"self": 0.3892307150003944,
"children": {
"run_training.setup": {
"total": 0.10566819899997881,
"count": 1,
"self": 0.10566819899997881
},
"TrainerController.start_learning": {
"total": 2180.6714923709997,
"count": 1,
"self": 3.724941298981321,
"children": {
"TrainerController._reset_env": {
"total": 7.36774900599994,
"count": 1,
"self": 7.36774900599994
},
"TrainerController.advance": {
"total": 2169.454374819018,
"count": 233079,
"self": 3.9473523728252076,
"children": {
"env_step": {
"total": 1703.0049505130914,
"count": 233079,
"self": 1430.9249859780693,
"children": {
"SubprocessEnvManager._take_step": {
"total": 269.60442605303183,
"count": 233079,
"self": 13.965629689035836,
"children": {
"TorchPolicy.evaluate": {
"total": 255.638796363996,
"count": 222954,
"self": 64.59134923909585,
"children": {
"TorchPolicy.sample_actions": {
"total": 191.04744712490015,
"count": 222954,
"self": 191.04744712490015
}
}
}
}
},
"workers": {
"total": 2.475538481990384,
"count": 233079,
"self": 0.0,
"children": {
"worker_root": {
"total": 2173.0696061969907,
"count": 233079,
"is_parallel": true,
"self": 994.0661345670808,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020750209999960134,
"count": 1,
"is_parallel": true,
"self": 0.0003132800000003044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001761740999995709,
"count": 2,
"is_parallel": true,
"self": 0.001761740999995709
}
}
},
"UnityEnvironment.step": {
"total": 0.02677217500001916,
"count": 1,
"is_parallel": true,
"self": 0.0002853189998859307,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002002070000344247,
"count": 1,
"is_parallel": true,
"self": 0.0002002070000344247
},
"communicator.exchange": {
"total": 0.025608613000031255,
"count": 1,
"is_parallel": true,
"self": 0.025608613000031255
},
"steps_from_proto": {
"total": 0.00067803600006755,
"count": 1,
"is_parallel": true,
"self": 0.00022627400005603704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000451762000011513,
"count": 2,
"is_parallel": true,
"self": 0.000451762000011513
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1179.0034716299099,
"count": 233078,
"is_parallel": true,
"self": 34.0772023279776,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.03869257399344,
"count": 233078,
"is_parallel": true,
"self": 76.03869257399344
},
"communicator.exchange": {
"total": 977.3621011999588,
"count": 233078,
"is_parallel": true,
"self": 977.3621011999588
},
"steps_from_proto": {
"total": 91.52547552798,
"count": 233078,
"is_parallel": true,
"self": 37.76106898913372,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.76440653884629,
"count": 466156,
"is_parallel": true,
"self": 53.76440653884629
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 462.5020719331013,
"count": 233079,
"self": 5.8266957150663075,
"children": {
"process_trajectory": {
"total": 147.11544563603547,
"count": 233079,
"self": 145.80312004703546,
"children": {
"RLTrainer._checkpoint": {
"total": 1.312325589000011,
"count": 10,
"self": 1.312325589000011
}
}
},
"_update_policy": {
"total": 309.55993058199954,
"count": 97,
"self": 257.0280057629918,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.53192481900771,
"count": 2910,
"self": 52.53192481900771
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.050001270021312e-07,
"count": 1,
"self": 8.050001270021312e-07
},
"TrainerController._save_models": {
"total": 0.12442644200018549,
"count": 1,
"self": 0.0019689910000124655,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12245745100017302,
"count": 1,
"self": 0.12245745100017302
}
}
}
}
}
}
}