Pyramids1 / run_logs /timers.json
Ali-HF's picture
pyramids push
c17b0be verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5357014536857605,
"min": 0.4885501265525818,
"max": 1.3672888278961182,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16105.3291015625,
"min": 14633.0537109375,
"max": 41478.07421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989953.0,
"min": 29952.0,
"max": 989953.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989953.0,
"min": 29952.0,
"max": 989953.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4859307110309601,
"min": -0.1279352754354477,
"max": 0.48969095945358276,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 129.74349975585938,
"min": -30.832401275634766,
"max": 131.91815185546875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.1239407062530518,
"min": -0.07054843753576279,
"max": 1.1239407062530518,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 300.0921630859375,
"min": -19.259723663330078,
"max": 300.0921630859375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06895625416267043,
"min": 0.06491572559960851,
"max": 0.077766173912679,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0343438124400564,
"min": 0.5111360454767268,
"max": 1.0504675143553566,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.14470961635104482,
"min": 0.0006092779717312957,
"max": 0.14470961635104482,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 2.1706442452656725,
"min": 0.006702057689044252,
"max": 2.1706442452656725,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.469717510126672e-06,
"min": 7.469717510126672e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011204576265190008,
"min": 0.00011204576265190008,
"max": 0.003382917272361,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248987333333334,
"min": 0.10248987333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373481000000002,
"min": 1.3886848,
"max": 2.527639,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025873834600000014,
"min": 0.00025873834600000014,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003881075190000002,
"min": 0.003881075190000002,
"max": 0.11279113609999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01273680105805397,
"min": 0.01273680105805397,
"max": 0.595973014831543,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19105201959609985,
"min": 0.1810784786939621,
"max": 4.171811103820801,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 388.9078947368421,
"min": 381.28205128205127,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29557.0,
"min": 15984.0,
"max": 32166.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5321262970958884,
"min": -1.0000000521540642,
"max": 1.5321262970958884,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 116.44159857928753,
"min": -31.99920167028904,
"max": 116.44159857928753,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5321262970958884,
"min": -1.0000000521540642,
"max": 1.5321262970958884,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 116.44159857928753,
"min": -31.99920167028904,
"max": 116.44159857928753,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05111637746219793,
"min": 0.05111637746219793,
"max": 11.873138459399343,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8848446871270426,
"min": 3.884203913505189,
"max": 189.97021535038948,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735365331",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735367522"
},
"total": 2190.8134054539996,
"count": 1,
"self": 0.5277503719999004,
"children": {
"run_training.setup": {
"total": 0.05583379699987745,
"count": 1,
"self": 0.05583379699987745
},
"TrainerController.start_learning": {
"total": 2190.229821285,
"count": 1,
"self": 1.3656889760213744,
"children": {
"TrainerController._reset_env": {
"total": 2.258067975999893,
"count": 1,
"self": 2.258067975999893
},
"TrainerController.advance": {
"total": 2186.5242351489787,
"count": 63578,
"self": 1.428900129941212,
"children": {
"env_step": {
"total": 1516.9090599969984,
"count": 63578,
"self": 1366.8656240940095,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.1971187909851,
"count": 63578,
"self": 4.548870795938456,
"children": {
"TorchPolicy.evaluate": {
"total": 144.64824799504663,
"count": 62558,
"self": 144.64824799504663
}
}
},
"workers": {
"total": 0.8463171120038169,
"count": 63578,
"self": 0.0,
"children": {
"worker_root": {
"total": 2185.2515501240364,
"count": 63578,
"is_parallel": true,
"self": 938.1058184230296,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021518030000606814,
"count": 1,
"is_parallel": true,
"self": 0.0007019209997451981,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014498820003154833,
"count": 8,
"is_parallel": true,
"self": 0.0014498820003154833
}
}
},
"UnityEnvironment.step": {
"total": 0.04895375699993565,
"count": 1,
"is_parallel": true,
"self": 0.00211787600005664,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004745209998873179,
"count": 1,
"is_parallel": true,
"self": 0.0004745209998873179
},
"communicator.exchange": {
"total": 0.0447163090000231,
"count": 1,
"is_parallel": true,
"self": 0.0447163090000231
},
"steps_from_proto": {
"total": 0.0016450509999685892,
"count": 1,
"is_parallel": true,
"self": 0.0003625850001753861,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001282465999793203,
"count": 8,
"is_parallel": true,
"self": 0.001282465999793203
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1247.1457317010068,
"count": 63577,
"is_parallel": true,
"self": 33.262665535929955,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.671067043075027,
"count": 63577,
"is_parallel": true,
"self": 22.671067043075027
},
"communicator.exchange": {
"total": 1095.2457668710263,
"count": 63577,
"is_parallel": true,
"self": 1095.2457668710263
},
"steps_from_proto": {
"total": 95.96623225097551,
"count": 63577,
"is_parallel": true,
"self": 19.42191158590731,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.5443206650682,
"count": 508616,
"is_parallel": true,
"self": 76.5443206650682
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 668.1862750220394,
"count": 63578,
"self": 2.639867959978119,
"children": {
"process_trajectory": {
"total": 127.43863318106514,
"count": 63578,
"self": 127.19605569106534,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24257748999980322,
"count": 2,
"self": 0.24257748999980322
}
}
},
"_update_policy": {
"total": 538.1077738809961,
"count": 448,
"self": 301.5329312819749,
"children": {
"TorchPPOOptimizer.update": {
"total": 236.57484259902117,
"count": 22812,
"self": 236.57484259902117
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3359999684325885e-06,
"count": 1,
"self": 1.3359999684325885e-06
},
"TrainerController._save_models": {
"total": 0.08182784800010268,
"count": 1,
"self": 0.0015451620001840638,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08028268599991861,
"count": 1,
"self": 0.08028268599991861
}
}
}
}
}
}
}