ppo-PyramidsRND / run_logs /timers.json
daishan986's picture
First Push
5e2500d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3817181885242462,
"min": 0.3817181885242462,
"max": 1.462928295135498,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11439.3310546875,
"min": 11439.3310546875,
"max": 44379.39453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989876.0,
"min": 29952.0,
"max": 989876.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989876.0,
"min": 29952.0,
"max": 989876.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4439846873283386,
"min": -0.12145934253931046,
"max": 0.5775772929191589,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 119.431884765625,
"min": -29.39316177368164,
"max": 163.78372192382812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03619305416941643,
"min": -0.1592051237821579,
"max": 0.30137020349502563,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.735931396484375,
"min": -41.234127044677734,
"max": 72.63021850585938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07062915362338132,
"min": 0.06570433203006057,
"max": 0.07565576343783574,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9888081507273385,
"min": 0.5295903440648502,
"max": 1.0503451101637133,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014783945759744984,
"min": 0.0001980511182137315,
"max": 0.019633743017782698,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20697524063642977,
"min": 0.0025746645367785096,
"max": 0.27487240224895776,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.731847422750003e-06,
"min": 7.731847422750003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010824586391850004,
"min": 0.00010824586391850004,
"max": 0.0033756358747880993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257725,
"min": 0.10257725,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360815,
"min": 1.3886848,
"max": 2.4252119,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002674672750000001,
"min": 0.0002674672750000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003744541850000002,
"min": 0.003744541850000002,
"max": 0.11253866880999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013486322946846485,
"min": 0.013486322946846485,
"max": 0.46075740456581116,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18880851566791534,
"min": 0.18880851566791534,
"max": 3.225301742553711,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 413.75,
"min": 306.8333333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31445.0,
"min": 15984.0,
"max": 32967.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4283078695206266,
"min": -1.0000000521540642,
"max": 1.6723270535779495,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 108.55139808356762,
"min": -31.996801659464836,
"max": 160.54339714348316,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4283078695206266,
"min": -1.0000000521540642,
"max": 1.6723270535779495,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 108.55139808356762,
"min": -31.996801659464836,
"max": 160.54339714348316,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.057614636967053615,
"min": 0.04991505024408531,
"max": 8.629222057759762,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.3787124094960745,
"min": 4.04311906977091,
"max": 138.0675529241562,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1760237507",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/environment/miniconda3/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --force --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1760239409"
},
"total": 1902.4375437229999,
"count": 1,
"self": 0.4246169799996551,
"children": {
"run_training.setup": {
"total": 0.04659206099995572,
"count": 1,
"self": 0.04659206099995572
},
"TrainerController.start_learning": {
"total": 1901.9663346820003,
"count": 1,
"self": 0.9665804918834056,
"children": {
"TrainerController._reset_env": {
"total": 4.8143533979998665,
"count": 1,
"self": 4.8143533979998665
},
"TrainerController.advance": {
"total": 1896.116772749117,
"count": 63728,
"self": 0.9136694141129738,
"children": {
"env_step": {
"total": 1233.4892155770913,
"count": 63728,
"self": 1112.3540708930914,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.57224166997207,
"count": 63728,
"self": 3.502090220021273,
"children": {
"TorchPolicy.evaluate": {
"total": 117.0701514499508,
"count": 62578,
"self": 117.0701514499508
}
}
},
"workers": {
"total": 0.5629030140278246,
"count": 63728,
"self": 0.0,
"children": {
"worker_root": {
"total": 1899.2138721169583,
"count": 63728,
"is_parallel": true,
"self": 881.5417636209695,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004729730999997628,
"count": 1,
"is_parallel": true,
"self": 0.0013939090003987076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033358219995989202,
"count": 8,
"is_parallel": true,
"self": 0.0033358219995989202
}
}
},
"UnityEnvironment.step": {
"total": 0.0488378110001122,
"count": 1,
"is_parallel": true,
"self": 0.0006207230003383302,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007245739998325007,
"count": 1,
"is_parallel": true,
"self": 0.0007245739998325007
},
"communicator.exchange": {
"total": 0.045769815000085146,
"count": 1,
"is_parallel": true,
"self": 0.045769815000085146
},
"steps_from_proto": {
"total": 0.0017226989998562203,
"count": 1,
"is_parallel": true,
"self": 0.00036063399988961464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013620649999666057,
"count": 8,
"is_parallel": true,
"self": 0.0013620649999666057
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1017.6721084959888,
"count": 63727,
"is_parallel": true,
"self": 23.625297090090044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 16.37866502396423,
"count": 63727,
"is_parallel": true,
"self": 16.37866502396423
},
"communicator.exchange": {
"total": 915.8770471409582,
"count": 63727,
"is_parallel": true,
"self": 915.8770471409582
},
"steps_from_proto": {
"total": 61.79109924097634,
"count": 63727,
"is_parallel": true,
"self": 13.113283899825092,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.677815341151245,
"count": 509816,
"is_parallel": true,
"self": 48.677815341151245
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 661.7138877579127,
"count": 63728,
"self": 1.9668749539428063,
"children": {
"process_trajectory": {
"total": 111.19098275696683,
"count": 63728,
"self": 111.00955135396703,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18143140299980587,
"count": 2,
"self": 0.18143140299980587
}
}
},
"_update_policy": {
"total": 548.5560300470031,
"count": 445,
"self": 264.2065940489824,
"children": {
"TorchPPOOptimizer.update": {
"total": 284.3494359980207,
"count": 22857,
"self": 284.3494359980207
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.800000952964183e-07,
"count": 1,
"self": 9.800000952964183e-07
},
"TrainerController._save_models": {
"total": 0.0686270629998944,
"count": 1,
"self": 0.001995081000131904,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0666319819997625,
"count": 1,
"self": 0.0666319819997625
}
}
}
}
}
}
}