ppo-Pyramids / run_logs /timers.json
Flynews's picture
First Push
e49fa2a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37276285886764526,
"min": 0.37276285886764526,
"max": 1.4964677095413208,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11141.13671875,
"min": 11141.13671875,
"max": 45396.84375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989952.0,
"min": 29952.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989952.0,
"min": 29952.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5944133996963501,
"min": -0.10207769274711609,
"max": 0.6730814576148987,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.03016662597656,
"min": -24.702800750732422,
"max": 190.4820556640625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.008078553713858128,
"min": -0.016677310690283775,
"max": 0.4706823229789734,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.270073652267456,
"min": -4.769711017608643,
"max": 111.55171203613281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06728277978485465,
"min": 0.06505929724254132,
"max": 0.07452829490771773,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9419589169879652,
"min": 0.5136183031755045,
"max": 1.0500602968865733,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018761063877261543,
"min": 0.0006374082541119545,
"max": 0.018761063877261543,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2626548942816616,
"min": 0.007648899049343454,
"max": 0.276255791715812,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.241176157735716e-06,
"min": 7.241176157735716e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010137646620830002,
"min": 0.00010137646620830002,
"max": 0.0035077334307555986,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241369285714286,
"min": 0.10241369285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4337917,
"min": 1.3886848,
"max": 2.5692443999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025112791642857145,
"min": 0.00025112791642857145,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035157908300000006,
"min": 0.0035157908300000006,
"max": 0.11694751556,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008098416961729527,
"min": 0.008098416961729527,
"max": 0.3290998041629791,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11337783932685852,
"min": 0.11337783932685852,
"max": 2.3036985397338867,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.37254901960785,
"min": 269.3980582524272,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30026.0,
"min": 15984.0,
"max": 32883.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6637504806022834,
"min": -1.0000000521540642,
"max": 1.6982361748105004,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 168.0387985408306,
"min": -30.754001662135124,
"max": 178.31479835510254,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6637504806022834,
"min": -1.0000000521540642,
"max": 1.6982361748105004,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 168.0387985408306,
"min": -30.754001662135124,
"max": 178.31479835510254,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02470995312024111,
"min": 0.024618531409518238,
"max": 6.964528012089431,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.495705265144352,
"min": 2.495705265144352,
"max": 111.4324481934309,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685326692",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685328930"
},
"total": 2237.7078220759995,
"count": 1,
"self": 0.4904277469995577,
"children": {
"run_training.setup": {
"total": 0.038170413999978337,
"count": 1,
"self": 0.038170413999978337
},
"TrainerController.start_learning": {
"total": 2237.179223915,
"count": 1,
"self": 1.3083293730387595,
"children": {
"TrainerController._reset_env": {
"total": 3.952754061000178,
"count": 1,
"self": 3.952754061000178
},
"TrainerController.advance": {
"total": 2231.827563474961,
"count": 64012,
"self": 1.345359446916973,
"children": {
"env_step": {
"total": 1592.200334197016,
"count": 64012,
"self": 1484.9756074140796,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.45573378092581,
"count": 64012,
"self": 4.667062900914971,
"children": {
"TorchPolicy.evaluate": {
"total": 101.78867088001084,
"count": 62567,
"self": 101.78867088001084
}
}
},
"workers": {
"total": 0.7689930020105749,
"count": 64012,
"self": 0.0,
"children": {
"worker_root": {
"total": 2232.4386428889993,
"count": 64012,
"is_parallel": true,
"self": 859.2854925020395,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001852999999982785,
"count": 1,
"is_parallel": true,
"self": 0.0005828850000852981,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001270114999897487,
"count": 8,
"is_parallel": true,
"self": 0.001270114999897487
}
}
},
"UnityEnvironment.step": {
"total": 0.04779091600016727,
"count": 1,
"is_parallel": true,
"self": 0.0005822109999371605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005043390001446824,
"count": 1,
"is_parallel": true,
"self": 0.0005043390001446824
},
"communicator.exchange": {
"total": 0.04492534600012732,
"count": 1,
"is_parallel": true,
"self": 0.04492534600012732
},
"steps_from_proto": {
"total": 0.0017790199999581091,
"count": 1,
"is_parallel": true,
"self": 0.00037568999960058136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014033300003575278,
"count": 8,
"is_parallel": true,
"self": 0.0014033300003575278
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1373.1531503869599,
"count": 64011,
"is_parallel": true,
"self": 31.980756223933895,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.839201045034542,
"count": 64011,
"is_parallel": true,
"self": 22.839201045034542
},
"communicator.exchange": {
"total": 1220.303337605945,
"count": 64011,
"is_parallel": true,
"self": 1220.303337605945
},
"steps_from_proto": {
"total": 98.02985551204642,
"count": 64011,
"is_parallel": true,
"self": 19.654176114041547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.37567939800488,
"count": 512088,
"is_parallel": true,
"self": 78.37567939800488
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 638.2818698310282,
"count": 64012,
"self": 2.539241273027301,
"children": {
"process_trajectory": {
"total": 110.13453325400087,
"count": 64012,
"self": 109.86244229400063,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27209096000024147,
"count": 2,
"self": 0.27209096000024147
}
}
},
"_update_policy": {
"total": 525.608095304,
"count": 450,
"self": 337.0010840809939,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.6070112230061,
"count": 22773,
"self": 188.6070112230061
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.589998626324814e-07,
"count": 1,
"self": 9.589998626324814e-07
},
"TrainerController._save_models": {
"total": 0.0905760470000132,
"count": 1,
"self": 0.0013563980000981246,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08921964899991508,
"count": 1,
"self": 0.08921964899991508
}
}
}
}
}
}
}