ChikeJ's picture
Second Push PyramidsT
e98dd35 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39670315384864807,
"min": 0.3710021674633026,
"max": 1.3738209009170532,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11951.873046875,
"min": 11046.9609375,
"max": 41676.23046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989971.0,
"min": 29951.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989971.0,
"min": 29951.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.19580592215061188,
"min": -0.1260053664445877,
"max": 0.3786279559135437,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 49.930511474609375,
"min": -30.367294311523438,
"max": 101.85092163085938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.004774597939103842,
"min": 0.004774597939103842,
"max": 0.4807899296283722,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.2175225019454956,
"min": 1.2175225019454956,
"max": 114.4280014038086,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06752379538236362,
"min": 0.06467358830640742,
"max": 0.07500651230887631,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9453331353530908,
"min": 0.6000520984710105,
"max": 1.05151145994508,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010019604268607164,
"min": 0.00015780335281042406,
"max": 0.014062588768865682,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1402744597605003,
"min": 0.002051443586535513,
"max": 0.19769684173433255,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.705490288678571e-06,
"min": 7.705490288678571e-06,
"max": 0.00029485196421601253,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001078768640415,
"min": 0.0001078768640415,
"max": 0.0032248507250497993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025684642857143,
"min": 0.1025684642857143,
"max": 0.1982839875,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359585000000001,
"min": 1.4359585000000001,
"max": 2.3596798,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002665895821428571,
"min": 0.0002665895821428571,
"max": 0.00982857035125,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00373225415,
"min": 0.00373225415,
"max": 0.10750752498,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01354020182043314,
"min": 0.01354020182043314,
"max": 0.6089368462562561,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1895628273487091,
"min": 0.1895628273487091,
"max": 4.871494770050049,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 620.1153846153846,
"min": 472.93939393939394,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32246.0,
"min": 17133.0,
"max": 32515.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.7258115045439738,
"min": -0.9999742455059483,
"max": 1.4361242182327039,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 37.74219823628664,
"min": -31.998401656746864,
"max": 94.78419840335846,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.7258115045439738,
"min": -0.9999742455059483,
"max": 1.4361242182327039,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 37.74219823628664,
"min": -31.998401656746864,
"max": 94.78419840335846,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08697961024993744,
"min": 0.07042404547400745,
"max": 12.552832635740439,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.522939732996747,
"min": 3.8147338173585013,
"max": 225.9509874433279,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710671974",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710674195"
},
"total": 2220.620351557,
"count": 1,
"self": 0.5459717220001039,
"children": {
"run_training.setup": {
"total": 0.07791086099996392,
"count": 1,
"self": 0.07791086099996392
},
"TrainerController.start_learning": {
"total": 2219.996468974,
"count": 1,
"self": 1.7184975650020533,
"children": {
"TrainerController._reset_env": {
"total": 3.352092254000013,
"count": 1,
"self": 3.352092254000013
},
"TrainerController.advance": {
"total": 2214.8348800359972,
"count": 63371,
"self": 1.7002305159676325,
"children": {
"env_step": {
"total": 1569.4469260570363,
"count": 63371,
"self": 1417.160669216967,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.27155189804182,
"count": 63371,
"self": 5.2855932830403844,
"children": {
"TorchPolicy.evaluate": {
"total": 145.98595861500144,
"count": 62561,
"self": 145.98595861500144
}
}
},
"workers": {
"total": 1.0147049420274925,
"count": 63371,
"self": 0.0,
"children": {
"worker_root": {
"total": 2214.179492142014,
"count": 63371,
"is_parallel": true,
"self": 934.2224572160001,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0051505840000345415,
"count": 1,
"is_parallel": true,
"self": 0.0037182970000912974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014322869999432442,
"count": 8,
"is_parallel": true,
"self": 0.0014322869999432442
}
}
},
"UnityEnvironment.step": {
"total": 0.05089801500002977,
"count": 1,
"is_parallel": true,
"self": 0.0006711720001248977,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000534691999973802,
"count": 1,
"is_parallel": true,
"self": 0.000534691999973802
},
"communicator.exchange": {
"total": 0.04786215999990873,
"count": 1,
"is_parallel": true,
"self": 0.04786215999990873
},
"steps_from_proto": {
"total": 0.0018299910000223463,
"count": 1,
"is_parallel": true,
"self": 0.0005225840002367477,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013074069997855986,
"count": 8,
"is_parallel": true,
"self": 0.0013074069997855986
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1279.957034926014,
"count": 63370,
"is_parallel": true,
"self": 37.82000195206433,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.715343572977645,
"count": 63370,
"is_parallel": true,
"self": 25.715343572977645
},
"communicator.exchange": {
"total": 1108.1623188659698,
"count": 63370,
"is_parallel": true,
"self": 1108.1623188659698
},
"steps_from_proto": {
"total": 108.25937053500229,
"count": 63370,
"is_parallel": true,
"self": 23.000426335195584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.2589441998067,
"count": 506960,
"is_parallel": true,
"self": 85.2589441998067
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 643.6877234629931,
"count": 63371,
"self": 3.158512999986101,
"children": {
"process_trajectory": {
"total": 133.08469435200243,
"count": 63371,
"self": 132.81982599400237,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2648683580000579,
"count": 2,
"self": 0.2648683580000579
}
}
},
"_update_policy": {
"total": 507.44451611100453,
"count": 444,
"self": 298.8913423840279,
"children": {
"TorchPPOOptimizer.update": {
"total": 208.55317372697664,
"count": 22785,
"self": 208.55317372697664
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.600003068044316e-07,
"count": 1,
"self": 9.600003068044316e-07
},
"TrainerController._save_models": {
"total": 0.09099815900026442,
"count": 1,
"self": 0.00187262500048746,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08912553399977696,
"count": 1,
"self": 0.08912553399977696
}
}
}
}
}
}
}