ppo-Pyramids / run_logs /timers.json
marcweibel's picture
First Push
03da5e7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.49508771300315857,
"min": 0.49298739433288574,
"max": 1.4071691036224365,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14868.474609375,
"min": 14805.3974609375,
"max": 42687.8828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29944.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29944.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4322894513607025,
"min": -0.1104528158903122,
"max": 0.5000013113021851,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 115.42127990722656,
"min": -26.508676528930664,
"max": 137.5003662109375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.10147782415151596,
"min": 0.0042106048204004765,
"max": 0.3912934958934784,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 27.094579696655273,
"min": 1.1410739421844482,
"max": 92.73655700683594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0662458893629567,
"min": 0.06399601611979853,
"max": 0.07523108304122648,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9936883404443505,
"min": 0.564539189677682,
"max": 1.0660755243939897,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016314810548081166,
"min": 0.0002776166952106739,
"max": 0.017409976197239364,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24472215822121748,
"min": 0.0036090170377387613,
"max": 0.24472215822121748,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.546457484546669e-06,
"min": 7.546457484546669e-06,
"max": 0.0002949948016684,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011319686226820004,
"min": 0.00011319686226820004,
"max": 0.0032602292132569996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251545333333331,
"min": 0.10251545333333331,
"max": 0.1983316,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377317999999998,
"min": 1.4778339,
"max": 2.4008372000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026129378800000017,
"min": 0.00026129378800000017,
"max": 0.00983332684,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003919406820000002,
"min": 0.003919406820000002,
"max": 0.10869562569999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014030850492417812,
"min": 0.012926673516631126,
"max": 0.4298626780509949,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2104627639055252,
"min": 0.18097342550754547,
"max": 3.438901424407959,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 427.3918918918919,
"min": 382.1578947368421,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31627.0,
"min": 16231.0,
"max": 34051.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4104026846990392,
"min": -0.9999613425424022,
"max": 1.538868407963922,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.3697986677289,
"min": -31.997601687908173,
"max": 121.17899835854769,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4104026846990392,
"min": -0.9999613425424022,
"max": 1.538868407963922,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.3697986677289,
"min": -31.997601687908173,
"max": 121.17899835854769,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06219701037263951,
"min": 0.05611792643321678,
"max": 8.689668038312126,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.602578767575324,
"min": 3.8106529234792106,
"max": 147.72435665130615,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702623352",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702625519"
},
"total": 2166.391708021,
"count": 1,
"self": 0.5475583479997113,
"children": {
"run_training.setup": {
"total": 0.06453233500008082,
"count": 1,
"self": 0.06453233500008082
},
"TrainerController.start_learning": {
"total": 2165.779617338,
"count": 1,
"self": 1.8584046470004978,
"children": {
"TrainerController._reset_env": {
"total": 3.153803168999957,
"count": 1,
"self": 3.153803168999957
},
"TrainerController.advance": {
"total": 2160.706517292,
"count": 63509,
"self": 1.818642827974145,
"children": {
"env_step": {
"total": 1470.2949372399812,
"count": 63509,
"self": 1351.187205338113,
"children": {
"SubprocessEnvManager._take_step": {
"total": 117.97948108894047,
"count": 63509,
"self": 4.62787908292853,
"children": {
"TorchPolicy.evaluate": {
"total": 113.35160200601194,
"count": 62555,
"self": 113.35160200601194
}
}
},
"workers": {
"total": 1.1282508129276039,
"count": 63509,
"self": 0.0,
"children": {
"worker_root": {
"total": 2161.760912915041,
"count": 63509,
"is_parallel": true,
"self": 933.0783685820745,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00375284900007955,
"count": 1,
"is_parallel": true,
"self": 0.002691407000043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010614420000365499,
"count": 8,
"is_parallel": true,
"self": 0.0010614420000365499
}
}
},
"UnityEnvironment.step": {
"total": 0.045700837000026695,
"count": 1,
"is_parallel": true,
"self": 0.00051075999999739,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041839000004983973,
"count": 1,
"is_parallel": true,
"self": 0.00041839000004983973
},
"communicator.exchange": {
"total": 0.04339444699996875,
"count": 1,
"is_parallel": true,
"self": 0.04339444699996875
},
"steps_from_proto": {
"total": 0.0013772400000107154,
"count": 1,
"is_parallel": true,
"self": 0.0002977600000804159,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010794799999302995,
"count": 8,
"is_parallel": true,
"self": 0.0010794799999302995
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1228.6825443329665,
"count": 63508,
"is_parallel": true,
"self": 34.668921604032676,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.639458781966255,
"count": 63508,
"is_parallel": true,
"self": 18.639458781966255
},
"communicator.exchange": {
"total": 1081.0193433160025,
"count": 63508,
"is_parallel": true,
"self": 1081.0193433160025
},
"steps_from_proto": {
"total": 94.35482063096492,
"count": 63508,
"is_parallel": true,
"self": 19.933651313971495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.42116931699343,
"count": 508064,
"is_parallel": true,
"self": 74.42116931699343
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 688.5929372240449,
"count": 63509,
"self": 3.5779278690484944,
"children": {
"process_trajectory": {
"total": 114.35246892299926,
"count": 63509,
"self": 114.16868193099924,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18378699200002302,
"count": 2,
"self": 0.18378699200002302
}
}
},
"_update_policy": {
"total": 570.6625404319972,
"count": 446,
"self": 232.49422359198832,
"children": {
"TorchPPOOptimizer.update": {
"total": 338.16831684000886,
"count": 22809,
"self": 338.16831684000886
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.88999841461191e-07,
"count": 1,
"self": 7.88999841461191e-07
},
"TrainerController._save_models": {
"total": 0.06089144099996702,
"count": 1,
"self": 0.0016667810000399186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0592246599999271,
"count": 1,
"self": 0.0592246599999271
}
}
}
}
}
}
}