ppo-Huggy / run_logs /timers.json
EmirhanExecute's picture
Huggy
8c0d843
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4081486463546753,
"min": 1.4081486463546753,
"max": 1.4276885986328125,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72205.640625,
"min": 69166.609375,
"max": 77358.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.98217179902755,
"min": 79.70274636510501,
"max": 409.1626016260163,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49349.0,
"min": 49170.0,
"max": 50327.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999316.0,
"min": 49790.0,
"max": 1999316.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999316.0,
"min": 49790.0,
"max": 1999316.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.464808464050293,
"min": 0.21391651034355164,
"max": 2.5066566467285156,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1520.786865234375,
"min": 26.097814559936523,
"max": 1520.786865234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8918290418396118,
"min": 1.7857408500352845,
"max": 4.036255992295449,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2401.2585188150406,
"min": 217.8603837043047,
"max": 2401.2585188150406,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8918290418396118,
"min": 1.7857408500352845,
"max": 4.036255992295449,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2401.2585188150406,
"min": 217.8603837043047,
"max": 2401.2585188150406,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01651296861883667,
"min": 0.013643855596395346,
"max": 0.020636530473590636,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04953890585651,
"min": 0.027287711192790692,
"max": 0.0564199849429618,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.058663583795229596,
"min": 0.020565773174166677,
"max": 0.061604329571127894,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1759907513856888,
"min": 0.04113154634833335,
"max": 0.17997658923268317,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4466988511333333e-06,
"min": 3.4466988511333333e-06,
"max": 0.000295286626571125,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.03400965534e-05,
"min": 1.03400965534e-05,
"max": 0.0008441814186062001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114886666666667,
"min": 0.10114886666666667,
"max": 0.19842887500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034466,
"min": 0.20745020000000006,
"max": 0.5813938000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.732844666666666e-05,
"min": 6.732844666666666e-05,
"max": 0.0049216008625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020198533999999998,
"min": 0.00020198533999999998,
"max": 0.014071550620000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688284217",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688286745"
},
"total": 2527.467364223,
"count": 1,
"self": 0.4345750339998631,
"children": {
"run_training.setup": {
"total": 0.07368603200006874,
"count": 1,
"self": 0.07368603200006874
},
"TrainerController.start_learning": {
"total": 2526.9591031570003,
"count": 1,
"self": 4.388682963029623,
"children": {
"TrainerController._reset_env": {
"total": 5.461473549000175,
"count": 1,
"self": 5.461473549000175
},
"TrainerController.advance": {
"total": 2516.986419539972,
"count": 232210,
"self": 4.665889163023621,
"children": {
"env_step": {
"total": 1971.050500356945,
"count": 232210,
"self": 1653.5656767661335,
"children": {
"SubprocessEnvManager._take_step": {
"total": 314.5004626838595,
"count": 232210,
"self": 17.758932944828302,
"children": {
"TorchPolicy.evaluate": {
"total": 296.7415297390312,
"count": 222946,
"self": 296.7415297390312
}
}
},
"workers": {
"total": 2.984360906951906,
"count": 232210,
"self": 0.0,
"children": {
"worker_root": {
"total": 2518.9544928300916,
"count": 232210,
"is_parallel": true,
"self": 1166.8137290161465,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010517810001147154,
"count": 1,
"is_parallel": true,
"self": 0.00033078400042541034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007209969996893051,
"count": 2,
"is_parallel": true,
"self": 0.0007209969996893051
}
}
},
"UnityEnvironment.step": {
"total": 0.03408777399999963,
"count": 1,
"is_parallel": true,
"self": 0.00036323799986348604,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024146100008692883,
"count": 1,
"is_parallel": true,
"self": 0.00024146100008692883
},
"communicator.exchange": {
"total": 0.03271762300005321,
"count": 1,
"is_parallel": true,
"self": 0.03271762300005321
},
"steps_from_proto": {
"total": 0.000765451999996003,
"count": 1,
"is_parallel": true,
"self": 0.00023339899985330703,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000532053000142696,
"count": 2,
"is_parallel": true,
"self": 0.000532053000142696
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1352.1407638139451,
"count": 232209,
"is_parallel": true,
"self": 40.36696551392379,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.3445682769177,
"count": 232209,
"is_parallel": true,
"self": 85.3445682769177
},
"communicator.exchange": {
"total": 1127.3239632300981,
"count": 232209,
"is_parallel": true,
"self": 1127.3239632300981
},
"steps_from_proto": {
"total": 99.10526679300551,
"count": 232209,
"is_parallel": true,
"self": 37.74330889481098,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.36195789819453,
"count": 464418,
"is_parallel": true,
"self": 61.36195789819453
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 541.2700300200033,
"count": 232210,
"self": 6.682049359968232,
"children": {
"process_trajectory": {
"total": 142.25753929703592,
"count": 232210,
"self": 140.97136082503562,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2861784720003016,
"count": 10,
"self": 1.2861784720003016
}
}
},
"_update_policy": {
"total": 392.3304413629992,
"count": 97,
"self": 331.20253447797654,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.12790688502264,
"count": 2910,
"self": 61.12790688502264
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0169997040065937e-06,
"count": 1,
"self": 1.0169997040065937e-06
},
"TrainerController._save_models": {
"total": 0.12252608799917653,
"count": 1,
"self": 0.002120239999385376,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12040584799979115,
"count": 1,
"self": 0.12040584799979115
}
}
}
}
}
}
}