Pyramids / run_logs /timers.json
wladimir's picture
First Push
6585e7e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4630821645259857,
"min": 0.4449754059314728,
"max": 1.4241944551467896,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13929.51171875,
"min": 13363.5009765625,
"max": 43204.36328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989906.0,
"min": 29984.0,
"max": 989906.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989906.0,
"min": 29984.0,
"max": 989906.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.45861712098121643,
"min": -0.11178482323884964,
"max": 0.48593631386756897,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 122.45076751708984,
"min": -26.828357696533203,
"max": 132.66061401367188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.022885648533701897,
"min": -0.03231687471270561,
"max": 0.4256077706813812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.110467910766602,
"min": -8.143852233886719,
"max": 101.29464721679688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06653981398336502,
"min": 0.06403459008323527,
"max": 0.07266814800079438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9315573957671102,
"min": 0.5570226457893805,
"max": 1.0547737967572175,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013986490499519277,
"min": 0.00011095229723620667,
"max": 0.014451769975203818,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19581086699326988,
"min": 0.0014423798640706867,
"max": 0.205165641662247,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.416226099385714e-06,
"min": 7.416226099385714e-06,
"max": 0.0002948508017164,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001038271653914,
"min": 0.0001038271653914,
"max": 0.0035070938309687996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247204285714284,
"min": 0.10247204285714284,
"max": 0.1982836,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346085999999998,
"min": 1.4346085999999998,
"max": 2.5690312000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025695708142857145,
"min": 0.00025695708142857145,
"max": 0.00982853164,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00359739914,
"min": 0.00359739914,
"max": 0.11692621688000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011457763612270355,
"min": 0.011457763612270355,
"max": 0.46690621972084045,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16040869057178497,
"min": 0.16040869057178497,
"max": 3.7352497577667236,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 397.61538461538464,
"min": 364.53846153846155,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31014.0,
"min": 16399.0,
"max": 32595.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4228358718638237,
"min": -0.9999467191596826,
"max": 1.5599063081266005,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 110.98119800537825,
"min": -31.995601668953896,
"max": 123.23259834200144,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4228358718638237,
"min": -0.9999467191596826,
"max": 1.5599063081266005,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 110.98119800537825,
"min": -31.995601668953896,
"max": 123.23259834200144,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04722479881363539,
"min": 0.045106792224562264,
"max": 9.69014005801257,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6835343074635603,
"min": 3.563436585740419,
"max": 164.73238098621368,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708043229",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1708045431"
},
"total": 2202.5841689069994,
"count": 1,
"self": 0.5311004349996438,
"children": {
"run_training.setup": {
"total": 0.04493276599987439,
"count": 1,
"self": 0.04493276599987439
},
"TrainerController.start_learning": {
"total": 2202.008135706,
"count": 1,
"self": 1.340281121983935,
"children": {
"TrainerController._reset_env": {
"total": 2.448347540999748,
"count": 1,
"self": 2.448347540999748
},
"TrainerController.advance": {
"total": 2198.1249183000164,
"count": 63598,
"self": 1.5376492427667472,
"children": {
"env_step": {
"total": 1568.6628836021537,
"count": 63598,
"self": 1436.737800378025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.0438932149791,
"count": 63598,
"self": 4.899333277910046,
"children": {
"TorchPolicy.evaluate": {
"total": 126.14455993706906,
"count": 62557,
"self": 126.14455993706906
}
}
},
"workers": {
"total": 0.8811900091495772,
"count": 63598,
"self": 0.0,
"children": {
"worker_root": {
"total": 2196.5665968208696,
"count": 63598,
"is_parallel": true,
"self": 877.7200548708579,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021645189999617287,
"count": 1,
"is_parallel": true,
"self": 0.0006774930016035796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001487025998358149,
"count": 8,
"is_parallel": true,
"self": 0.001487025998358149
}
}
},
"UnityEnvironment.step": {
"total": 0.048102089000167325,
"count": 1,
"is_parallel": true,
"self": 0.0005663790007019998,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004766409992953413,
"count": 1,
"is_parallel": true,
"self": 0.0004766409992953413
},
"communicator.exchange": {
"total": 0.045416153000587656,
"count": 1,
"is_parallel": true,
"self": 0.045416153000587656
},
"steps_from_proto": {
"total": 0.0016429159995823284,
"count": 1,
"is_parallel": true,
"self": 0.00035210000078222947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001290815998800099,
"count": 8,
"is_parallel": true,
"self": 0.001290815998800099
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1318.8465419500117,
"count": 63597,
"is_parallel": true,
"self": 35.06795853771473,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.84871911508253,
"count": 63597,
"is_parallel": true,
"self": 24.84871911508253
},
"communicator.exchange": {
"total": 1160.0065789049868,
"count": 63597,
"is_parallel": true,
"self": 1160.0065789049868
},
"steps_from_proto": {
"total": 98.92328539222763,
"count": 63597,
"is_parallel": true,
"self": 19.6580982419473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.26518715028033,
"count": 508776,
"is_parallel": true,
"self": 79.26518715028033
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 627.924385455096,
"count": 63598,
"self": 2.710339611176096,
"children": {
"process_trajectory": {
"total": 125.8383804749219,
"count": 63598,
"self": 125.59499526292257,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24338521199933894,
"count": 2,
"self": 0.24338521199933894
}
}
},
"_update_policy": {
"total": 499.37566536899794,
"count": 452,
"self": 294.8808876969788,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.49477767201915,
"count": 22791,
"self": 204.49477767201915
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5810001059435308e-06,
"count": 1,
"self": 1.5810001059435308e-06
},
"TrainerController._save_models": {
"total": 0.09458716199969786,
"count": 1,
"self": 0.00149247299941635,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0930946890002815,
"count": 1,
"self": 0.0930946890002815
}
}
}
}
}
}
}