MKQSaba's picture
First Push
a3fa1d4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4988373816013336,
"min": 0.4988373816013336,
"max": 1.400163173675537,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15076.861328125,
"min": 15076.861328125,
"max": 42475.3515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.18904902040958405,
"min": -0.09859872609376907,
"max": 0.27158740162849426,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 47.829402923583984,
"min": -23.762292861938477,
"max": 70.34114074707031,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.24346713721752167,
"min": -0.04391523078083992,
"max": 0.8556417226791382,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 61.59718704223633,
"min": -10.97880744934082,
"max": 202.78709411621094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06989871290349255,
"min": 0.0663985261275056,
"max": 0.0734289055469979,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9785819806488957,
"min": 0.5128724369738618,
"max": 1.0799716497142573,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014699819003151851,
"min": 0.000657679229681786,
"max": 0.05228406076121141,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2057974660441259,
"min": 0.009207509215545003,
"max": 0.36598842532847986,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.445411803942856e-06,
"min": 7.445411803942856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010423576525519998,
"min": 0.00010423576525519998,
"max": 0.0035078408307197997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248177142857144,
"min": 0.10248177142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347448000000003,
"min": 1.3691136000000002,
"max": 2.5692801999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025792896571428576,
"min": 0.00025792896571428576,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036110055200000006,
"min": 0.0036110055200000006,
"max": 0.11695109198,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01851927675306797,
"min": 0.017776142805814743,
"max": 1.096476435661316,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2592698633670807,
"min": 0.248866006731987,
"max": 7.675334930419922,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 636.7234042553191,
"min": 526.2745098039215,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29926.0,
"min": 15984.0,
"max": 32668.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8099063489031284,
"min": -1.0000000521540642,
"max": 1.2775882101234268,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 38.06559839844704,
"min": -32.000001668930054,
"max": 65.15699871629477,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8099063489031284,
"min": -1.0000000521540642,
"max": 1.2775882101234268,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 38.06559839844704,
"min": -32.000001668930054,
"max": 65.15699871629477,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1208576082123166,
"min": 0.09623786309898338,
"max": 24.668059619143605,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.6803075859788805,
"min": 4.908131018048152,
"max": 394.6889539062977,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749450453",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749452860"
},
"total": 2407.500947005,
"count": 1,
"self": 0.3725431739999294,
"children": {
"run_training.setup": {
"total": 0.022115040000244335,
"count": 1,
"self": 0.022115040000244335
},
"TrainerController.start_learning": {
"total": 2407.1062887909998,
"count": 1,
"self": 1.2638026009867644,
"children": {
"TrainerController._reset_env": {
"total": 2.1798572789998616,
"count": 1,
"self": 2.1798572789998616
},
"TrainerController.advance": {
"total": 2403.609847858014,
"count": 63363,
"self": 1.2046005291790607,
"children": {
"env_step": {
"total": 1824.8829998849324,
"count": 63363,
"self": 1718.3380242809653,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.8416403970241,
"count": 63363,
"self": 4.562331916999938,
"children": {
"TorchPolicy.evaluate": {
"total": 101.27930848002416,
"count": 62572,
"self": 101.27930848002416
}
}
},
"workers": {
"total": 0.7033352069429384,
"count": 63363,
"self": 0.0,
"children": {
"worker_root": {
"total": 2404.0349790329656,
"count": 63363,
"is_parallel": true,
"self": 798.8835096939429,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019007729997611023,
"count": 1,
"is_parallel": true,
"self": 0.0006705049995616719,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012302680001994304,
"count": 8,
"is_parallel": true,
"self": 0.0012302680001994304
}
}
},
"UnityEnvironment.step": {
"total": 0.03639379500009454,
"count": 1,
"is_parallel": true,
"self": 0.000335704999997688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003250939998906688,
"count": 1,
"is_parallel": true,
"self": 0.0003250939998906688
},
"communicator.exchange": {
"total": 0.034857528999964416,
"count": 1,
"is_parallel": true,
"self": 0.034857528999964416
},
"steps_from_proto": {
"total": 0.0008754670002417697,
"count": 1,
"is_parallel": true,
"self": 0.000208287000987184,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006671799992545857,
"count": 8,
"is_parallel": true,
"self": 0.0006671799992545857
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1605.1514693390227,
"count": 63362,
"is_parallel": true,
"self": 22.728871250064913,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.822803457100235,
"count": 63362,
"is_parallel": true,
"self": 12.822803457100235
},
"communicator.exchange": {
"total": 1485.7013662350005,
"count": 63362,
"is_parallel": true,
"self": 1485.7013662350005
},
"steps_from_proto": {
"total": 83.89842839685707,
"count": 63362,
"is_parallel": true,
"self": 18.41694223995728,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.48148615689979,
"count": 506896,
"is_parallel": true,
"self": 65.48148615689979
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 577.5222474439024,
"count": 63363,
"self": 2.4476308698958746,
"children": {
"process_trajectory": {
"total": 119.6922890950118,
"count": 63363,
"self": 119.53991497901188,
"children": {
"RLTrainer._checkpoint": {
"total": 0.152374115999919,
"count": 2,
"self": 0.152374115999919
}
}
},
"_update_policy": {
"total": 455.38232747899474,
"count": 447,
"self": 240.0689395170548,
"children": {
"TorchPPOOptimizer.update": {
"total": 215.31338796193995,
"count": 22797,
"self": 215.31338796193995
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0009998732130043e-06,
"count": 1,
"self": 1.0009998732130043e-06
},
"TrainerController._save_models": {
"total": 0.05278005199943436,
"count": 1,
"self": 0.0012748109993481194,
"children": {
"RLTrainer._checkpoint": {
"total": 0.051505241000086244,
"count": 1,
"self": 0.051505241000086244
}
}
}
}
}
}
}