shaharprofeta's picture
First Push
8a5796a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3852304518222809,
"min": 0.3852304518222809,
"max": 1.4606144428253174,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11575.404296875,
"min": 11575.404296875,
"max": 44309.19921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989973.0,
"min": 29952.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989973.0,
"min": 29952.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5869263410568237,
"min": -0.08284033089876175,
"max": 0.6566648483276367,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 169.6217041015625,
"min": -19.964519500732422,
"max": 189.11947631835938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.013054704293608665,
"min": -0.04121285304427147,
"max": 0.2007797360420227,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.7728095054626465,
"min": -10.50927734375,
"max": 48.387916564941406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06841490736006366,
"min": 0.06553651568248635,
"max": 0.07437448282281708,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9578087030408913,
"min": 0.49738821889716733,
"max": 1.0601679395573833,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017237641335719693,
"min": 0.00100124348898982,
"max": 0.017237641335719693,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24132697870007572,
"min": 0.014017408845857478,
"max": 0.24132697870007572,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.620033174307142e-06,
"min": 7.620033174307142e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010668046444029998,
"min": 0.00010668046444029998,
"max": 0.0036331414889529006,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253997857142856,
"min": 0.10253997857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355596999999998,
"min": 1.3886848,
"max": 2.6110471000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026374385928571424,
"min": 0.00026374385928571424,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036924140299999996,
"min": 0.0036924140299999996,
"max": 0.12112360529,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00859097670763731,
"min": 0.00859097670763731,
"max": 0.3061388432979584,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12027367949485779,
"min": 0.12027367949485779,
"max": 2.142971992492676,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 295.2970297029703,
"min": 295.2970297029703,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29825.0,
"min": 15984.0,
"max": 33239.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.645279192393369,
"min": -1.0000000521540642,
"max": 1.645279192393369,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 166.17319843173027,
"min": -28.33660177886486,
"max": 170.0087983161211,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.645279192393369,
"min": -1.0000000521540642,
"max": 1.645279192393369,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 166.17319843173027,
"min": -28.33660177886486,
"max": 170.0087983161211,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.026370829146826237,
"min": 0.026370829146826237,
"max": 5.724514028057456,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.66345374382945,
"min": 2.5514876203378662,
"max": 91.5922244489193,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1754746609",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1754748857"
},
"total": 2247.657674489,
"count": 1,
"self": 0.5364791360002528,
"children": {
"run_training.setup": {
"total": 0.028191187999709655,
"count": 1,
"self": 0.028191187999709655
},
"TrainerController.start_learning": {
"total": 2247.093004165,
"count": 1,
"self": 1.3494095689247843,
"children": {
"TrainerController._reset_env": {
"total": 2.41113845100017,
"count": 1,
"self": 2.41113845100017
},
"TrainerController.advance": {
"total": 2243.2554503740753,
"count": 64096,
"self": 1.372382891057896,
"children": {
"env_step": {
"total": 1591.0616079739766,
"count": 64096,
"self": 1443.7362951998698,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.5024546640534,
"count": 64096,
"self": 4.53101906701886,
"children": {
"TorchPolicy.evaluate": {
"total": 141.97143559703454,
"count": 62557,
"self": 141.97143559703454
}
}
},
"workers": {
"total": 0.8228581100534029,
"count": 64096,
"self": 0.0,
"children": {
"worker_root": {
"total": 2242.055981248899,
"count": 64096,
"is_parallel": true,
"self": 910.311038662905,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027050159997088485,
"count": 1,
"is_parallel": true,
"self": 0.0008549729991500499,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018500430005587987,
"count": 8,
"is_parallel": true,
"self": 0.0018500430005587987
}
}
},
"UnityEnvironment.step": {
"total": 0.05600612899979751,
"count": 1,
"is_parallel": true,
"self": 0.0005214049997448456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005033670004195301,
"count": 1,
"is_parallel": true,
"self": 0.0005033670004195301
},
"communicator.exchange": {
"total": 0.053402462999656564,
"count": 1,
"is_parallel": true,
"self": 0.053402462999656564
},
"steps_from_proto": {
"total": 0.0015788939999765716,
"count": 1,
"is_parallel": true,
"self": 0.0003272650005783362,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012516289993982355,
"count": 8,
"is_parallel": true,
"self": 0.0012516289993982355
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1331.744942585994,
"count": 64095,
"is_parallel": true,
"self": 31.381076652019146,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.22864351604494,
"count": 64095,
"is_parallel": true,
"self": 22.22864351604494
},
"communicator.exchange": {
"total": 1183.1714314359547,
"count": 64095,
"is_parallel": true,
"self": 1183.1714314359547
},
"steps_from_proto": {
"total": 94.96379098197531,
"count": 64095,
"is_parallel": true,
"self": 19.17478716692176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.78900381505355,
"count": 512760,
"is_parallel": true,
"self": 75.78900381505355
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.8214595090408,
"count": 64096,
"self": 2.5597475231288627,
"children": {
"process_trajectory": {
"total": 125.56474509291138,
"count": 64096,
"self": 125.36387383691226,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20087125599911815,
"count": 2,
"self": 0.20087125599911815
}
}
},
"_update_policy": {
"total": 522.6969668930005,
"count": 457,
"self": 289.9328444800517,
"children": {
"TorchPPOOptimizer.update": {
"total": 232.76412241294884,
"count": 22800,
"self": 232.76412241294884
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.859997251420282e-07,
"count": 1,
"self": 8.859997251420282e-07
},
"TrainerController._save_models": {
"total": 0.07700488499995117,
"count": 1,
"self": 0.0012432080002326984,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07576167699971847,
"count": 1,
"self": 0.07576167699971847
}
}
}
}
}
}
}