PPO-PyramidsRND / run_logs /timers.json
sumitk's picture
Initial Commit
55818a9
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4641489088535309,
"min": 0.4641489088535309,
"max": 0.5225774049758911,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 14043.2890625,
"min": 10588.298828125,
"max": 15794.37890625,
"count": 7
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 460.21875,
"min": 460.21875,
"max": 539.9827586206897,
"count": 7
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29454.0,
"min": 15115.0,
"max": 31319.0,
"count": 7
},
"Pyramids.Step.mean": {
"value": 1199969.0,
"min": 1019974.0,
"max": 1199969.0,
"count": 7
},
"Pyramids.Step.sum": {
"value": 1199969.0,
"min": 1019974.0,
"max": 1199969.0,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.36032506823539734,
"min": 0.2538290023803711,
"max": 0.36895352602005005,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 95.48614501953125,
"min": 42.13561248779297,
"max": 96.66582489013672,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.6892226934432983,
"min": -0.6892226934432983,
"max": 0.8987314701080322,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -182.64401245117188,
"min": -182.64401245117188,
"max": 235.4676513671875,
"count": 7
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3834874798776582,
"min": 1.253062042439806,
"max": 1.407818726496771,
"count": 7
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 88.54319871217012,
"min": 40.882999032735825,
"max": 90.10039849579334,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3834874798776582,
"min": 1.253062042439806,
"max": 1.407818726496771,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 88.54319871217012,
"min": 40.882999032735825,
"max": 90.10039849579334,
"count": 7
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.6397047238133382,
"min": 0.6397047238133382,
"max": 0.7813150221171479,
"count": 7
},
"Pyramids.Policy.RndReward.sum": {
"value": 40.941102324053645,
"min": 23.439450663514435,
"max": 43.4844670034945,
"count": 7
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06577221771433307,
"min": 0.06530051335686862,
"max": 0.07252948782832844,
"count": 7
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.920811048000663,
"min": 0.6442582946115231,
"max": 1.0879423174249265,
"count": 7
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.10291241172013714,
"min": 0.02121116742439975,
"max": 0.10291241172013714,
"count": 7
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 1.44077376408192,
"min": 0.19090050681959775,
"max": 1.44077376408192,
"count": 7
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.6392952155059478e-06,
"min": 3.6392952155059478e-06,
"max": 4.730763978637037e-05,
"count": 7
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.095013301708327e-05,
"min": 5.095013301708327e-05,
"max": 0.0005762992079006666,
"count": 7
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10121306547619047,
"min": 0.10121306547619047,
"max": 0.1157691851851852,
"count": 7
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4169829166666665,
"min": 1.0419226666666668,
"max": 1.5920993333333333,
"count": 7
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00013118524107142842,
"min": 0.00013118524107142842,
"max": 0.0015853416,
"count": 7
},
"Pyramids.Policy.Beta.sum": {
"value": 0.001836593374999998,
"min": 0.001836593374999998,
"max": 0.0193307234,
"count": 7
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013314624316990376,
"min": 0.012862170115113258,
"max": 0.01502944901585579,
"count": 7
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1864047348499298,
"min": 0.1352650374174118,
"max": 0.2015685737133026,
"count": 7
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682173649",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682174221"
},
"total": 571.6703061529997,
"count": 1,
"self": 0.9984784549992582,
"children": {
"run_training.setup": {
"total": 0.1252933630003099,
"count": 1,
"self": 0.1252933630003099
},
"TrainerController.start_learning": {
"total": 570.5465343350002,
"count": 1,
"self": 0.43370117899394245,
"children": {
"TrainerController._reset_env": {
"total": 4.351538560000336,
"count": 1,
"self": 4.351538560000336
},
"TrainerController.advance": {
"total": 565.5476142300072,
"count": 12868,
"self": 0.46691790904242225,
"children": {
"env_step": {
"total": 425.5422484500168,
"count": 12868,
"self": 395.82554607704105,
"children": {
"SubprocessEnvManager._take_step": {
"total": 29.452552143015055,
"count": 12868,
"self": 1.252491377041224,
"children": {
"TorchPolicy.evaluate": {
"total": 28.20006076597383,
"count": 12563,
"self": 28.20006076597383
}
}
},
"workers": {
"total": 0.2641502299607055,
"count": 12868,
"self": 0.0,
"children": {
"worker_root": {
"total": 568.9347690650502,
"count": 12868,
"is_parallel": true,
"self": 204.0601599120414,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020555050000439223,
"count": 1,
"is_parallel": true,
"self": 0.0006246969996936969,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014308080003502255,
"count": 8,
"is_parallel": true,
"self": 0.0014308080003502255
}
}
},
"UnityEnvironment.step": {
"total": 0.052971607000017684,
"count": 1,
"is_parallel": true,
"self": 0.0006087649994697131,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005684940001629002,
"count": 1,
"is_parallel": true,
"self": 0.0005684940001629002
},
"communicator.exchange": {
"total": 0.04984381900021617,
"count": 1,
"is_parallel": true,
"self": 0.04984381900021617
},
"steps_from_proto": {
"total": 0.001950529000168899,
"count": 1,
"is_parallel": true,
"self": 0.0004596290000336012,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014909000001352979,
"count": 8,
"is_parallel": true,
"self": 0.0014909000001352979
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 364.8746091530088,
"count": 12867,
"is_parallel": true,
"self": 8.189302013027827,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.69794912699399,
"count": 12867,
"is_parallel": true,
"self": 5.69794912699399
},
"communicator.exchange": {
"total": 327.05352850702275,
"count": 12867,
"is_parallel": true,
"self": 327.05352850702275
},
"steps_from_proto": {
"total": 23.93382950596424,
"count": 12867,
"is_parallel": true,
"self": 5.484773770170705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 18.449055735793536,
"count": 102936,
"is_parallel": true,
"self": 18.449055735793536
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 139.53844787094795,
"count": 12868,
"self": 0.8017819739684455,
"children": {
"process_trajectory": {
"total": 24.607402939977874,
"count": 12868,
"self": 24.607402939977874
},
"_update_policy": {
"total": 114.12926295700163,
"count": 94,
"self": 72.6441576090192,
"children": {
"TorchPPOOptimizer.update": {
"total": 41.485105347982426,
"count": 4530,
"self": 41.485105347982426
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.384999450237956e-06,
"count": 1,
"self": 1.384999450237956e-06
},
"TrainerController._save_models": {
"total": 0.21367898099924787,
"count": 1,
"self": 0.004406408999784617,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20927257199946325,
"count": 1,
"self": 0.20927257199946325
}
}
}
}
}
}
}