zimka's picture
First Push
d46b11c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6076599955558777,
"min": 0.5923882722854614,
"max": 1.4416825771331787,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18317.302734375,
"min": 17800.08203125,
"max": 43734.8828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.19271090626716614,
"min": -0.09983684867620468,
"max": 0.19271090626716614,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 49.14128112792969,
"min": -24.060680389404297,
"max": 49.14128112792969,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.011703865602612495,
"min": -0.011703865602612495,
"max": 0.461391806602478,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.984485626220703,
"min": -2.984485626220703,
"max": 109.34986114501953,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06882973944909906,
"min": 0.06428515679355913,
"max": 0.0725272628219178,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9636163522873868,
"min": 0.49143051600064314,
"max": 1.0407014043303209,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010227514696087842,
"min": 0.0004155907063713054,
"max": 0.010227514696087842,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14318520574522978,
"min": 0.004987088476455665,
"max": 0.14318520574522978,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.73212599408572e-06,
"min": 7.73212599408572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010824976391720009,
"min": 0.00010824976391720009,
"max": 0.0035071160309613995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257734285714284,
"min": 0.10257734285714284,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360827999999999,
"min": 1.3886848,
"max": 2.5690386000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026747655142857164,
"min": 0.00026747655142857164,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003744671720000003,
"min": 0.003744671720000003,
"max": 0.11692695614000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012460383586585522,
"min": 0.012460383586585522,
"max": 0.45620039105415344,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17444537580013275,
"min": 0.17444537580013275,
"max": 3.1934027671813965,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 613.44,
"min": 613.44,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30672.0,
"min": 15984.0,
"max": 33903.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.906371957808733,
"min": -1.0000000521540642,
"max": 0.906371957808733,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 45.31859789043665,
"min": -30.997401610016823,
"max": 45.31859789043665,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.906371957808733,
"min": -1.0000000521540642,
"max": 0.906371957808733,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 45.31859789043665,
"min": -30.997401610016823,
"max": 45.31859789043665,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08027245839359239,
"min": 0.08027245839359239,
"max": 9.600501103326678,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.013622919679619,
"min": 3.954603912192397,
"max": 153.60801765322685,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741775156",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741777255"
},
"total": 2098.765049498,
"count": 1,
"self": 0.47583024199957435,
"children": {
"run_training.setup": {
"total": 0.019627852999974493,
"count": 1,
"self": 0.019627852999974493
},
"TrainerController.start_learning": {
"total": 2098.269591403,
"count": 1,
"self": 1.4140341899328632,
"children": {
"TrainerController._reset_env": {
"total": 2.086692922999873,
"count": 1,
"self": 2.086692922999873
},
"TrainerController.advance": {
"total": 2094.6693355450675,
"count": 63237,
"self": 1.4236951751172455,
"children": {
"env_step": {
"total": 1409.9440794779837,
"count": 63237,
"self": 1252.5133927619988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.6518227599911,
"count": 63237,
"self": 4.750612389946355,
"children": {
"TorchPolicy.evaluate": {
"total": 151.90121037004474,
"count": 62548,
"self": 151.90121037004474
}
}
},
"workers": {
"total": 0.7788639559937565,
"count": 63237,
"self": 0.0,
"children": {
"worker_root": {
"total": 2093.2051340440585,
"count": 63237,
"is_parallel": true,
"self": 953.9215038749646,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020757789998242515,
"count": 1,
"is_parallel": true,
"self": 0.0007017570001153217,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013740219997089298,
"count": 8,
"is_parallel": true,
"self": 0.0013740219997089298
}
}
},
"UnityEnvironment.step": {
"total": 0.04972740899984274,
"count": 1,
"is_parallel": true,
"self": 0.0005311619997883099,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042652000001908164,
"count": 1,
"is_parallel": true,
"self": 0.00042652000001908164
},
"communicator.exchange": {
"total": 0.04708492700001443,
"count": 1,
"is_parallel": true,
"self": 0.04708492700001443
},
"steps_from_proto": {
"total": 0.001684800000020914,
"count": 1,
"is_parallel": true,
"self": 0.00036008499978379405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00132471500023712,
"count": 8,
"is_parallel": true,
"self": 0.00132471500023712
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1139.2836301690938,
"count": 63236,
"is_parallel": true,
"self": 31.777482157082204,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.79776340301919,
"count": 63236,
"is_parallel": true,
"self": 23.79776340301919
},
"communicator.exchange": {
"total": 985.0672462029918,
"count": 63236,
"is_parallel": true,
"self": 985.0672462029918
},
"steps_from_proto": {
"total": 98.64113840600066,
"count": 63236,
"is_parallel": true,
"self": 19.955688728202404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.68544967779826,
"count": 505888,
"is_parallel": true,
"self": 78.68544967779826
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 683.3015608919666,
"count": 63237,
"self": 2.5237451669374877,
"children": {
"process_trajectory": {
"total": 128.80714049303242,
"count": 63237,
"self": 128.60319254903243,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20394794399999228,
"count": 2,
"self": 0.20394794399999228
}
}
},
"_update_policy": {
"total": 551.9706752319967,
"count": 446,
"self": 306.394864768052,
"children": {
"TorchPPOOptimizer.update": {
"total": 245.57581046394466,
"count": 22812,
"self": 245.57581046394466
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.819997674436308e-07,
"count": 1,
"self": 8.819997674436308e-07
},
"TrainerController._save_models": {
"total": 0.09952786299982108,
"count": 1,
"self": 0.001698620999832201,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09782924199998888,
"count": 1,
"self": 0.09782924199998888
}
}
}
}
}
}
}