sunil18p31a0101's picture
First Push Training
41376f9
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5649408102035522,
"min": 0.5649408102035522,
"max": 1.4391257762908936,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17147.083984375,
"min": 16958.62890625,
"max": 43657.3203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5076766014099121,
"min": -0.11307421326637268,
"max": 0.5076766014099121,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 136.56500244140625,
"min": -27.250885009765625,
"max": 136.56500244140625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.026886429637670517,
"min": 0.0009475810220465064,
"max": 0.31611743569374084,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.232449531555176,
"min": 0.23405250906944275,
"max": 75.8681869506836,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06779815149234816,
"min": 0.0654063078725899,
"max": 0.07387026159867781,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0169722723852224,
"min": 0.49134000898629887,
"max": 1.0686679257099363,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01561053839234066,
"min": 0.000283631573367646,
"max": 0.01561053839234066,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2341580758851099,
"min": 0.002552684160308814,
"max": 0.2341580758851099,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.485257504946669e-06,
"min": 7.485257504946669e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011227886257420003,
"min": 0.00011227886257420003,
"max": 0.0032598134133955997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249505333333335,
"min": 0.10249505333333335,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374258000000003,
"min": 1.3691136000000002,
"max": 2.5724558,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002592558280000001,
"min": 0.0002592558280000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038888374200000014,
"min": 0.0038888374200000014,
"max": 0.10868177955999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01688181422650814,
"min": 0.01688181422650814,
"max": 0.46513041853904724,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.25322720408439636,
"min": 0.2372875213623047,
"max": 3.255913019180298,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 386.7837837837838,
"min": 369.4868421052632,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28622.0,
"min": 15984.0,
"max": 34018.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5643466462691624,
"min": -1.0000000521540642,
"max": 1.5643466462691624,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 117.32599847018719,
"min": -32.000001668930054,
"max": 117.32599847018719,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5643466462691624,
"min": -1.0000000521540642,
"max": 1.5643466462691624,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 117.32599847018719,
"min": -32.000001668930054,
"max": 117.32599847018719,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0669891503887872,
"min": 0.0669891503887872,
"max": 8.443654282949865,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.024186279159039,
"min": 4.94648015871644,
"max": 135.09846852719784,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692539021",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692541280"
},
"total": 2259.0034658860004,
"count": 1,
"self": 0.9836250880007356,
"children": {
"run_training.setup": {
"total": 0.058786668999800895,
"count": 1,
"self": 0.058786668999800895
},
"TrainerController.start_learning": {
"total": 2257.961054129,
"count": 1,
"self": 1.474427534026745,
"children": {
"TrainerController._reset_env": {
"total": 5.277173040999969,
"count": 1,
"self": 5.277173040999969
},
"TrainerController.advance": {
"total": 2251.0491052279726,
"count": 63664,
"self": 1.5256891040321534,
"children": {
"env_step": {
"total": 1574.016178442008,
"count": 63664,
"self": 1455.8156397638263,
"children": {
"SubprocessEnvManager._take_step": {
"total": 117.2816544140137,
"count": 63664,
"self": 5.102869102038312,
"children": {
"TorchPolicy.evaluate": {
"total": 112.17878531197539,
"count": 62569,
"self": 112.17878531197539
}
}
},
"workers": {
"total": 0.9188842641679003,
"count": 63664,
"self": 0.0,
"children": {
"worker_root": {
"total": 2252.528248673018,
"count": 63664,
"is_parallel": true,
"self": 919.7874099870792,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00277695600016159,
"count": 1,
"is_parallel": true,
"self": 0.0007873200006542902,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019896359995073,
"count": 8,
"is_parallel": true,
"self": 0.0019896359995073
}
}
},
"UnityEnvironment.step": {
"total": 0.05048362099978476,
"count": 1,
"is_parallel": true,
"self": 0.000576331000047503,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005758739998782403,
"count": 1,
"is_parallel": true,
"self": 0.0005758739998782403
},
"communicator.exchange": {
"total": 0.04728439700011222,
"count": 1,
"is_parallel": true,
"self": 0.04728439700011222
},
"steps_from_proto": {
"total": 0.002047018999746797,
"count": 1,
"is_parallel": true,
"self": 0.0004721800000879739,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001574838999658823,
"count": 8,
"is_parallel": true,
"self": 0.001574838999658823
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1332.7408386859388,
"count": 63663,
"is_parallel": true,
"self": 35.410814856993056,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.017191696967075,
"count": 63663,
"is_parallel": true,
"self": 25.017191696967075
},
"communicator.exchange": {
"total": 1157.3996859501435,
"count": 63663,
"is_parallel": true,
"self": 1157.3996859501435
},
"steps_from_proto": {
"total": 114.91314618183515,
"count": 63663,
"is_parallel": true,
"self": 22.91913610110805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.9940100807271,
"count": 509304,
"is_parallel": true,
"self": 91.9940100807271
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 675.5072376819326,
"count": 63664,
"self": 2.7458800709337083,
"children": {
"process_trajectory": {
"total": 119.90400565199343,
"count": 63664,
"self": 119.5599529749934,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34405267700003606,
"count": 2,
"self": 0.34405267700003606
}
}
},
"_update_policy": {
"total": 552.8573519590054,
"count": 442,
"self": 359.8213949060546,
"children": {
"TorchPPOOptimizer.update": {
"total": 193.03595705295083,
"count": 22818,
"self": 193.03595705295083
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4899997040629387e-06,
"count": 1,
"self": 1.4899997040629387e-06
},
"TrainerController._save_models": {
"total": 0.16034683600082644,
"count": 1,
"self": 0.0020878020013697096,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15825903399945673,
"count": 1,
"self": 0.15825903399945673
}
}
}
}
}
}
}