Pyramids / run_logs /timers.json
Parthi's picture
First Push
732449b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43915560841560364,
"min": 0.43915560841560364,
"max": 1.4234611988067627,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13146.5625,
"min": 13146.5625,
"max": 43182.1171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.516571044921875,
"min": -0.08865094184875488,
"max": 0.5249823331832886,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 143.60675048828125,
"min": -21.276226043701172,
"max": 145.42010498046875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.057158518582582474,
"min": -0.017699720337986946,
"max": 0.4814152419567108,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 15.890068054199219,
"min": -4.690425872802734,
"max": 114.09541320800781,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06509863779776626,
"min": 0.06421051919480678,
"max": 0.07464485454703958,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9764795669664939,
"min": 0.5114945926577731,
"max": 1.0511575324247437,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01553486914943076,
"min": 0.0001456282720922591,
"max": 0.017217206227498887,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2330230372414614,
"min": 0.002038795809291627,
"max": 0.2410408871849844,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.491377502906662e-06,
"min": 7.491377502906662e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011237066254359994,
"min": 0.00011237066254359994,
"max": 0.0035076620307794,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249709333333334,
"min": 0.10249709333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374564000000002,
"min": 1.3691136000000002,
"max": 2.5692206,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025945962399999986,
"min": 0.00025945962399999986,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003891894359999998,
"min": 0.003891894359999998,
"max": 0.11694513794,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012437420897185802,
"min": 0.012437420897185802,
"max": 0.44716331362724304,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18656131625175476,
"min": 0.18006260693073273,
"max": 3.130143165588379,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 357.6588235294118,
"min": 354.7674418604651,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30401.0,
"min": 15984.0,
"max": 32294.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6165237897740943,
"min": -1.0000000521540642,
"max": 1.620530843918706,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 135.78799834102392,
"min": -32.000001668930054,
"max": 135.78799834102392,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6165237897740943,
"min": -1.0000000521540642,
"max": 1.620530843918706,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 135.78799834102392,
"min": -32.000001668930054,
"max": 135.78799834102392,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04647597727792648,
"min": 0.04647597727792648,
"max": 9.092697262763977,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9039820913458243,
"min": 3.9039820913458243,
"max": 145.48315620422363,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686118762",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686121017"
},
"total": 2255.1028320309997,
"count": 1,
"self": 0.47591283099973225,
"children": {
"run_training.setup": {
"total": 0.0366336299998693,
"count": 1,
"self": 0.0366336299998693
},
"TrainerController.start_learning": {
"total": 2254.59028557,
"count": 1,
"self": 1.5666082909478973,
"children": {
"TrainerController._reset_env": {
"total": 5.063071235999814,
"count": 1,
"self": 5.063071235999814
},
"TrainerController.advance": {
"total": 2247.8638521460525,
"count": 63705,
"self": 1.5537499131901313,
"children": {
"env_step": {
"total": 1588.766362417879,
"count": 63705,
"self": 1472.0924524337556,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.77020215704897,
"count": 63705,
"self": 4.9658996420444055,
"children": {
"TorchPolicy.evaluate": {
"total": 110.80430251500457,
"count": 62555,
"self": 110.80430251500457
}
}
},
"workers": {
"total": 0.9037078270744132,
"count": 63705,
"self": 0.0,
"children": {
"worker_root": {
"total": 2248.993563313996,
"count": 63705,
"is_parallel": true,
"self": 896.7022219069768,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025155490002362058,
"count": 1,
"is_parallel": true,
"self": 0.0006804409999858763,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018351080002503295,
"count": 8,
"is_parallel": true,
"self": 0.0018351080002503295
}
}
},
"UnityEnvironment.step": {
"total": 0.07674503699990964,
"count": 1,
"is_parallel": true,
"self": 0.0006662979999418894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005023429998800566,
"count": 1,
"is_parallel": true,
"self": 0.0005023429998800566
},
"communicator.exchange": {
"total": 0.07351763499991648,
"count": 1,
"is_parallel": true,
"self": 0.07351763499991648
},
"steps_from_proto": {
"total": 0.0020587610001712164,
"count": 1,
"is_parallel": true,
"self": 0.00042051599939441076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016382450007768057,
"count": 8,
"is_parallel": true,
"self": 0.0016382450007768057
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1352.2913414070194,
"count": 63704,
"is_parallel": true,
"self": 34.55258667493763,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.889135044054456,
"count": 63704,
"is_parallel": true,
"self": 22.889135044054456
},
"communicator.exchange": {
"total": 1189.6145849770123,
"count": 63704,
"is_parallel": true,
"self": 1189.6145849770123
},
"steps_from_proto": {
"total": 105.23503471101503,
"count": 63704,
"is_parallel": true,
"self": 21.060295321027752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.17473938998728,
"count": 509632,
"is_parallel": true,
"self": 84.17473938998728
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 657.5437398149834,
"count": 63705,
"self": 2.8623747060014466,
"children": {
"process_trajectory": {
"total": 109.40628894397696,
"count": 63705,
"self": 109.16081514297684,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2454738010001165,
"count": 2,
"self": 0.2454738010001165
}
}
},
"_update_policy": {
"total": 545.275076165005,
"count": 447,
"self": 351.2921764690236,
"children": {
"TorchPPOOptimizer.update": {
"total": 193.98289969598136,
"count": 22803,
"self": 193.98289969598136
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.580000212532468e-07,
"count": 1,
"self": 8.580000212532468e-07
},
"TrainerController._save_models": {
"total": 0.0967530389998501,
"count": 1,
"self": 0.0015374720005638665,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09521556699928624,
"count": 1,
"self": 0.09521556699928624
}
}
}
}
}
}
}