ppo-pyramids / run_logs /timers.json
Lysel's picture
First Push
34ad12a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38188955187797546,
"min": 0.38188955187797546,
"max": 1.501266598701477,
"count": 38
},
"Pyramids.Policy.Entropy.sum": {
"value": 11572.78125,
"min": 11572.78125,
"max": 45542.421875,
"count": 38
},
"Pyramids.Step.mean": {
"value": 1139999.0,
"min": 29950.0,
"max": 1139999.0,
"count": 38
},
"Pyramids.Step.sum": {
"value": 1139999.0,
"min": 29950.0,
"max": 1139999.0,
"count": 38
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6691520810127258,
"min": -0.11134287714958191,
"max": 0.755984902381897,
"count": 38
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 198.73817443847656,
"min": -26.833633422851562,
"max": 218.47962951660156,
"count": 38
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.002073489362373948,
"min": -0.04220078885555267,
"max": 0.3586871325969696,
"count": 38
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.6158263683319092,
"min": -11.014406204223633,
"max": 85.00885009765625,
"count": 38
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07053040664398316,
"min": 0.0661051647970453,
"max": 0.07343601807104085,
"count": 38
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9874256930157643,
"min": 0.506370608525079,
"max": 1.0320668164931703,
"count": 38
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01595725925396006,
"min": 0.0006119679733746128,
"max": 0.01595725925396006,
"count": 38
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22340162955544082,
"min": 0.007955583653869967,
"max": 0.22474448316315115,
"count": 38
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0001874605732274405,
"min": 0.0001874605732274405,
"max": 0.0002984110005296667,
"count": 38
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.002624448025184167,
"min": 0.002088877003707667,
"max": 0.0038007931330689996,
"count": 38
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16248684523809526,
"min": 0.16248684523809526,
"max": 0.19947033333333336,
"count": 38
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.2748158333333337,
"min": 1.3962923333333335,
"max": 2.692464433333333,
"count": 38
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006252435839285714,
"min": 0.006252435839285714,
"max": 0.0099470863,
"count": 38
},
"Pyramids.Policy.Beta.sum": {
"value": 0.08753410175,
"min": 0.06962960409999999,
"max": 0.12670640689999998,
"count": 38
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008175112307071686,
"min": 0.007788532413542271,
"max": 0.35342660546302795,
"count": 38
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1144515722990036,
"min": 0.1095438003540039,
"max": 2.4739861488342285,
"count": 38
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 288.67289719626166,
"min": 264.08035714285717,
"max": 999.0,
"count": 38
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30888.0,
"min": 16621.0,
"max": 34094.0,
"count": 38
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6739345651085131,
"min": -1.0000000521540642,
"max": 1.7180606918409467,
"count": 38
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 179.1109984666109,
"min": -32.000001668930054,
"max": 192.42279748618603,
"count": 38
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6739345651085131,
"min": -1.0000000521540642,
"max": 1.7180606918409467,
"count": 38
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 179.1109984666109,
"min": -32.000001668930054,
"max": 192.42279748618603,
"count": 38
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02430717246081989,
"min": 0.02430717246081989,
"max": 6.620569071348975,
"count": 38
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6008674533077283,
"min": 2.2279771442554193,
"max": 112.54967421293259,
"count": 38
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1768338498",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1768341010"
},
"total": 2512.588613391,
"count": 1,
"self": 0.5052783890000683,
"children": {
"run_training.setup": {
"total": 0.022476386000107595,
"count": 1,
"self": 0.022476386000107595
},
"TrainerController.start_learning": {
"total": 2512.0608586159997,
"count": 1,
"self": 1.618787881015578,
"children": {
"TrainerController._reset_env": {
"total": 2.000398597000185,
"count": 1,
"self": 2.000398597000185
},
"TrainerController.advance": {
"total": 2508.263030581984,
"count": 73243,
"self": 1.6506406379749023,
"children": {
"env_step": {
"total": 1768.7173214021482,
"count": 73243,
"self": 1594.5715477281192,
"children": {
"SubprocessEnvManager._take_step": {
"total": 173.14291946004641,
"count": 73243,
"self": 5.264045809934942,
"children": {
"TorchPolicy.evaluate": {
"total": 167.87887365011147,
"count": 71641,
"self": 167.87887365011147
}
}
},
"workers": {
"total": 1.0028542139825731,
"count": 73242,
"self": 0.0,
"children": {
"worker_root": {
"total": 2504.750773728044,
"count": 73242,
"is_parallel": true,
"self": 1045.0704402040892,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016284260000247741,
"count": 1,
"is_parallel": true,
"self": 0.0004998580002393282,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011285679997854459,
"count": 8,
"is_parallel": true,
"self": 0.0011285679997854459
}
}
},
"UnityEnvironment.step": {
"total": 0.04967214399994191,
"count": 1,
"is_parallel": true,
"self": 0.0005460500001390756,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000509201000113535,
"count": 1,
"is_parallel": true,
"self": 0.000509201000113535
},
"communicator.exchange": {
"total": 0.04691357099954985,
"count": 1,
"is_parallel": true,
"self": 0.04691357099954985
},
"steps_from_proto": {
"total": 0.0017033220001394511,
"count": 1,
"is_parallel": true,
"self": 0.00034726899957604473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013560530005634064,
"count": 8,
"is_parallel": true,
"self": 0.0013560530005634064
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1459.6803335239547,
"count": 73241,
"is_parallel": true,
"self": 38.03736068719172,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.45456565491577,
"count": 73241,
"is_parallel": true,
"self": 26.45456565491577
},
"communicator.exchange": {
"total": 1274.436042397895,
"count": 73241,
"is_parallel": true,
"self": 1274.436042397895
},
"steps_from_proto": {
"total": 120.75236478395209,
"count": 73241,
"is_parallel": true,
"self": 25.485454056859453,
"children": {
"_process_rank_one_or_two_observation": {
"total": 95.26691072709264,
"count": 585928,
"is_parallel": true,
"self": 95.26691072709264
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 737.8950685418608,
"count": 73242,
"self": 3.0947183007647254,
"children": {
"process_trajectory": {
"total": 139.074340333098,
"count": 73242,
"self": 138.8962951410981,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17804519199989954,
"count": 2,
"self": 0.17804519199989954
}
}
},
"_update_policy": {
"total": 595.7260099079981,
"count": 514,
"self": 332.53715679293373,
"children": {
"TorchPPOOptimizer.update": {
"total": 263.1888531150644,
"count": 26127,
"self": 263.1888531150644
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6100002540042624e-06,
"count": 1,
"self": 1.6100002540042624e-06
},
"TrainerController._save_models": {
"total": 0.1786399459997483,
"count": 1,
"self": 0.005350548999558669,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17328939700018964,
"count": 1,
"self": 0.17328939700018964
}
}
}
}
}
}
}