SRobbins's picture
First Push
8ba68d9
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.15423423051834106,
"min": 0.13721418380737305,
"max": 1.4190685749053955,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4604.8173828125,
"min": 4147.16162109375,
"max": 43048.86328125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999929.0,
"min": 29952.0,
"max": 2999929.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999929.0,
"min": 29952.0,
"max": 2999929.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8467289805412292,
"min": -0.09302370250225067,
"max": 0.8801586627960205,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 261.6392517089844,
"min": -22.418712615966797,
"max": 276.36981201171875,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010497023351490498,
"min": -0.02791176736354828,
"max": 0.3400300443172455,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.2435801029205322,
"min": -8.429353713989258,
"max": 81.60720825195312,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06999989494327927,
"min": 0.0641818592363658,
"max": 0.07345304454238863,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.049998424149189,
"min": 0.46964243210781226,
"max": 1.0675909318767176,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01472319920699103,
"min": 0.0008196608255127934,
"max": 0.017320428911175224,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22084798810486544,
"min": 0.008545185434281167,
"max": 0.24248600475645316,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4898195034266677e-06,
"min": 1.4898195034266677e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2347292551400014e-05,
"min": 2.2347292551400014e-05,
"max": 0.0039692403769198996,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049657333333335,
"min": 0.10049657333333335,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5074486000000002,
"min": 1.3897045333333333,
"max": 2.7674971333333334,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.960767600000005e-05,
"min": 5.960767600000005e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008941151400000008,
"min": 0.0008941151400000008,
"max": 0.13231570199,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005716682877391577,
"min": 0.0053168656304478645,
"max": 0.4777553975582123,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08575024455785751,
"min": 0.07443612068891525,
"max": 3.344287872314453,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 211.8041958041958,
"min": 204.30666666666667,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30288.0,
"min": 15984.0,
"max": 34468.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.760042244384826,
"min": -1.0000000521540642,
"max": 1.7920507165616837,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 249.9259987026453,
"min": -32.000001668930054,
"max": 269.2289977297187,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.760042244384826,
"min": -1.0000000521540642,
"max": 1.7920507165616837,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 249.9259987026453,
"min": -32.000001668930054,
"max": 269.2289977297187,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01261298624315163,
"min": 0.012211193999422849,
"max": 9.655847731977701,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7910440465275315,
"min": 1.611877607923816,
"max": 154.49356371164322,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675782570",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675789816"
},
"total": 7246.024776210001,
"count": 1,
"self": 0.4841035190020193,
"children": {
"run_training.setup": {
"total": 0.10747879599989574,
"count": 1,
"self": 0.10747879599989574
},
"TrainerController.start_learning": {
"total": 7245.433193894999,
"count": 1,
"self": 3.919313242845419,
"children": {
"TrainerController._reset_env": {
"total": 6.676386037000157,
"count": 1,
"self": 6.676386037000157
},
"TrainerController.advance": {
"total": 7234.751813337153,
"count": 195155,
"self": 3.9572483882739107,
"children": {
"env_step": {
"total": 5055.713049628909,
"count": 195155,
"self": 4734.15716575288,
"children": {
"SubprocessEnvManager._take_step": {
"total": 319.15854826902455,
"count": 195155,
"self": 13.473830749114086,
"children": {
"TorchPolicy.evaluate": {
"total": 305.68471751991046,
"count": 187549,
"self": 103.04553354683549,
"children": {
"TorchPolicy.sample_actions": {
"total": 202.63918397307498,
"count": 187549,
"self": 202.63918397307498
}
}
}
}
},
"workers": {
"total": 2.3973356070039245,
"count": 195155,
"self": 0.0,
"children": {
"worker_root": {
"total": 7230.1895236420205,
"count": 195155,
"is_parallel": true,
"self": 2829.549793344958,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002082131999941339,
"count": 1,
"is_parallel": true,
"self": 0.0007750979998490948,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013070340000922442,
"count": 8,
"is_parallel": true,
"self": 0.0013070340000922442
}
}
},
"UnityEnvironment.step": {
"total": 0.06898873499994806,
"count": 1,
"is_parallel": true,
"self": 0.0005562840001402947,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045164199991631904,
"count": 1,
"is_parallel": true,
"self": 0.00045164199991631904
},
"communicator.exchange": {
"total": 0.06632968999997502,
"count": 1,
"is_parallel": true,
"self": 0.06632968999997502
},
"steps_from_proto": {
"total": 0.0016511189999164344,
"count": 1,
"is_parallel": true,
"self": 0.0004155549997904018,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012355640001260326,
"count": 8,
"is_parallel": true,
"self": 0.0012355640001260326
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4400.639730297063,
"count": 195154,
"is_parallel": true,
"self": 93.98043170281562,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 68.30831622605137,
"count": 195154,
"is_parallel": true,
"self": 68.30831622605137
},
"communicator.exchange": {
"total": 3939.3213168581424,
"count": 195154,
"is_parallel": true,
"self": 3939.3213168581424
},
"steps_from_proto": {
"total": 299.0296655100533,
"count": 195154,
"is_parallel": true,
"self": 65.37637468827165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 233.65329082178164,
"count": 1561232,
"is_parallel": true,
"self": 233.65329082178164
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2175.08151531997,
"count": 195155,
"self": 7.726188629730132,
"children": {
"process_trajectory": {
"total": 479.6649972402504,
"count": 195155,
"self": 479.06662691624865,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5983703240017348,
"count": 6,
"self": 0.5983703240017348
}
}
},
"_update_policy": {
"total": 1687.6903294499893,
"count": 1395,
"self": 648.8642857736272,
"children": {
"TorchPPOOptimizer.update": {
"total": 1038.8260436763621,
"count": 68406,
"self": 1038.8260436763621
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.499991963617504e-07,
"count": 1,
"self": 8.499991963617504e-07
},
"TrainerController._save_models": {
"total": 0.08568042800106923,
"count": 1,
"self": 0.001437590000932687,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08424283800013654,
"count": 1,
"self": 0.08424283800013654
}
}
}
}
}
}
}