sarodesparsh's picture
init
2e8cb07 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3356103003025055,
"min": 0.318541020154953,
"max": 1.489730715751648,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10095.158203125,
"min": 9591.9072265625,
"max": 45192.47265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989899.0,
"min": 29952.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989899.0,
"min": 29952.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5575686693191528,
"min": -0.08570696413516998,
"max": 0.5934262275695801,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 155.56166076660156,
"min": -20.826791763305664,
"max": 164.8709716796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012937402352690697,
"min": -0.006819415371865034,
"max": 0.2574804723262787,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.6095352172851562,
"min": -1.7253121137619019,
"max": 61.02286911010742,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848087570390661,
"min": 0.06550784249333809,
"max": 0.07301270654813075,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9587322598546926,
"min": 0.4885081889863185,
"max": 1.0951905982219612,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01678876793095992,
"min": 0.000547822611816244,
"max": 0.01678876793095992,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23504275103343888,
"min": 0.007121693953611172,
"max": 0.23504275103343888,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2632904360785725e-06,
"min": 7.2632904360785725e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010168606610510002,
"min": 0.00010168606610510002,
"max": 0.0036323359892213997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024210642857143,
"min": 0.1024210642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338949000000003,
"min": 1.3886848,
"max": 2.6107786,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002518643221428572,
"min": 0.0002518643221428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035261005100000005,
"min": 0.0035261005100000005,
"max": 0.12109678214,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01031291764229536,
"min": 0.00995531678199768,
"max": 0.341031014919281,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1443808525800705,
"min": 0.13937443494796753,
"max": 2.3872170448303223,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 334.1914893617021,
"min": 303.3076923076923,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31414.0,
"min": 15984.0,
"max": 33018.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6433806273565497,
"min": -1.0000000521540642,
"max": 1.6747098663351039,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 152.83439834415913,
"min": -29.9826016202569,
"max": 152.83439834415913,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6433806273565497,
"min": -1.0000000521540642,
"max": 1.6747098663351039,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 152.83439834415913,
"min": -29.9826016202569,
"max": 152.83439834415913,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03517450252766921,
"min": 0.033297422097977966,
"max": 6.639530652202666,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2712287350732367,
"min": 3.030065410915995,
"max": 106.23249043524265,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739154978",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739157336"
},
"total": 2357.6876556689995,
"count": 1,
"self": 0.4798981599997205,
"children": {
"run_training.setup": {
"total": 0.02027819099998851,
"count": 1,
"self": 0.02027819099998851
},
"TrainerController.start_learning": {
"total": 2357.1874793179995,
"count": 1,
"self": 1.6822531850093583,
"children": {
"TrainerController._reset_env": {
"total": 2.8494313700000475,
"count": 1,
"self": 2.8494313700000475
},
"TrainerController.advance": {
"total": 2352.5675210409904,
"count": 63821,
"self": 1.7160677449560353,
"children": {
"env_step": {
"total": 1639.7456933269937,
"count": 63821,
"self": 1463.6536997379349,
"children": {
"SubprocessEnvManager._take_step": {
"total": 175.0754872990451,
"count": 63821,
"self": 5.082564697064299,
"children": {
"TorchPolicy.evaluate": {
"total": 169.9929226019808,
"count": 62555,
"self": 169.9929226019808
}
}
},
"workers": {
"total": 1.0165062900136945,
"count": 63821,
"self": 0.0,
"children": {
"worker_root": {
"total": 2351.253634867991,
"count": 63821,
"is_parallel": true,
"self": 1014.1786310259845,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002411087999917072,
"count": 1,
"is_parallel": true,
"self": 0.0006742040000062843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017368839999107877,
"count": 8,
"is_parallel": true,
"self": 0.0017368839999107877
}
}
},
"UnityEnvironment.step": {
"total": 0.061958047999951305,
"count": 1,
"is_parallel": true,
"self": 0.0005168610000509943,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044780000007449416,
"count": 1,
"is_parallel": true,
"self": 0.00044780000007449416
},
"communicator.exchange": {
"total": 0.05944776499995896,
"count": 1,
"is_parallel": true,
"self": 0.05944776499995896
},
"steps_from_proto": {
"total": 0.001545621999866853,
"count": 1,
"is_parallel": true,
"self": 0.0003294009998171532,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012162210000496998,
"count": 8,
"is_parallel": true,
"self": 0.0012162210000496998
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1337.0750038420065,
"count": 63820,
"is_parallel": true,
"self": 33.186493161977296,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.80788342995993,
"count": 63820,
"is_parallel": true,
"self": 23.80788342995993
},
"communicator.exchange": {
"total": 1181.7735680420344,
"count": 63820,
"is_parallel": true,
"self": 1181.7735680420344
},
"steps_from_proto": {
"total": 98.30705920803484,
"count": 63820,
"is_parallel": true,
"self": 20.54629551314997,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.76076369488487,
"count": 510560,
"is_parallel": true,
"self": 77.76076369488487
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 711.1057599690407,
"count": 63821,
"self": 3.2450057089811253,
"children": {
"process_trajectory": {
"total": 133.39100635805562,
"count": 63821,
"self": 133.16552715405578,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22547920399983923,
"count": 2,
"self": 0.22547920399983923
}
}
},
"_update_policy": {
"total": 574.469747902004,
"count": 454,
"self": 316.4153069460058,
"children": {
"TorchPPOOptimizer.update": {
"total": 258.0544409559982,
"count": 22761,
"self": 258.0544409559982
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.570000318286475e-07,
"count": 1,
"self": 8.570000318286475e-07
},
"TrainerController._save_models": {
"total": 0.08827286499990805,
"count": 1,
"self": 0.0012894559999949706,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08698340899991308,
"count": 1,
"self": 0.08698340899991308
}
}
}
}
}
}
}