lesliepzimmermann's picture
first push
ca40649
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5645940899848938,
"min": 0.5482778549194336,
"max": 1.4056267738342285,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16811.353515625,
"min": 16439.5625,
"max": 42641.09375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.20708060264587402,
"min": -0.1451137214899063,
"max": 0.20708060264587402,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 52.18431091308594,
"min": -34.39195251464844,
"max": 52.18431091308594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01593734323978424,
"min": 0.009690994396805763,
"max": 0.4687096178531647,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.016210556030273,
"min": 2.364602565765381,
"max": 111.08418273925781,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06811609017476505,
"min": 0.0651717334667303,
"max": 0.07265960076970923,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9536252624467108,
"min": 0.49858642950063253,
"max": 1.033571569069275,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010213584932903906,
"min": 0.0004199197209500351,
"max": 0.012866814248548184,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1429901890606547,
"min": 0.005878876093300491,
"max": 0.1429901890606547,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3181689892142914e-06,
"min": 7.3181689892142914e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010245436584900008,
"min": 0.00010245436584900008,
"max": 0.0036332344889219,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243935714285714,
"min": 0.10243935714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434151,
"min": 1.3886848,
"max": 2.6110781000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025369177857142874,
"min": 0.00025369177857142874,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035516849000000023,
"min": 0.0035516849000000023,
"max": 0.12112670218999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010833946987986565,
"min": 0.010833946987986565,
"max": 0.5612369179725647,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1516752541065216,
"min": 0.1516752541065216,
"max": 3.9286582469940186,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 602.6739130434783,
"min": 602.6739130434783,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27723.0,
"min": 15984.0,
"max": 32382.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9624347521559052,
"min": -1.0000000521540642,
"max": 0.9624347521559052,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 44.27199859917164,
"min": -29.92320165038109,
"max": 44.27199859917164,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9624347521559052,
"min": -1.0000000521540642,
"max": 0.9624347521559052,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 44.27199859917164,
"min": -29.92320165038109,
"max": 44.27199859917164,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06757615297101438,
"min": 0.06757615297101438,
"max": 11.078034261241555,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1085030366666615,
"min": 3.1085030366666615,
"max": 177.24854817986488,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690912414",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690914862"
},
"total": 2448.0045822539996,
"count": 1,
"self": 0.5329758419993595,
"children": {
"run_training.setup": {
"total": 0.04473490500004118,
"count": 1,
"self": 0.04473490500004118
},
"TrainerController.start_learning": {
"total": 2447.426871507,
"count": 1,
"self": 1.7264599349746277,
"children": {
"TrainerController._reset_env": {
"total": 4.283175711000013,
"count": 1,
"self": 4.283175711000013
},
"TrainerController.advance": {
"total": 2441.3130483020254,
"count": 63332,
"self": 1.740284125960443,
"children": {
"env_step": {
"total": 1726.2592841460066,
"count": 63332,
"self": 1597.8914779960708,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.35790321600052,
"count": 63332,
"self": 5.3354360339258164,
"children": {
"TorchPolicy.evaluate": {
"total": 122.0224671820747,
"count": 62543,
"self": 122.0224671820747
}
}
},
"workers": {
"total": 1.0099029339353365,
"count": 63332,
"self": 0.0,
"children": {
"worker_root": {
"total": 2441.0890882810663,
"count": 63332,
"is_parallel": true,
"self": 976.6385176611116,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001765175999935309,
"count": 1,
"is_parallel": true,
"self": 0.0005408940000961593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012242819998391496,
"count": 8,
"is_parallel": true,
"self": 0.0012242819998391496
}
}
},
"UnityEnvironment.step": {
"total": 0.056533634000061284,
"count": 1,
"is_parallel": true,
"self": 0.0006635429999732878,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00054025500003263,
"count": 1,
"is_parallel": true,
"self": 0.00054025500003263
},
"communicator.exchange": {
"total": 0.05040350499984925,
"count": 1,
"is_parallel": true,
"self": 0.05040350499984925
},
"steps_from_proto": {
"total": 0.004926331000206119,
"count": 1,
"is_parallel": true,
"self": 0.0004031979999581381,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004523133000247981,
"count": 8,
"is_parallel": true,
"self": 0.004523133000247981
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1464.4505706199548,
"count": 63331,
"is_parallel": true,
"self": 38.21173909091158,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.302247351994538,
"count": 63331,
"is_parallel": true,
"self": 25.302247351994538
},
"communicator.exchange": {
"total": 1284.2000780330206,
"count": 63331,
"is_parallel": true,
"self": 1284.2000780330206
},
"steps_from_proto": {
"total": 116.736506144028,
"count": 63331,
"is_parallel": true,
"self": 23.43237216205921,
"children": {
"_process_rank_one_or_two_observation": {
"total": 93.30413398196879,
"count": 506648,
"is_parallel": true,
"self": 93.30413398196879
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 713.3134800300581,
"count": 63332,
"self": 3.2347949600077754,
"children": {
"process_trajectory": {
"total": 118.86785004104354,
"count": 63332,
"self": 118.63169165604381,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23615838499972597,
"count": 2,
"self": 0.23615838499972597
}
}
},
"_update_policy": {
"total": 591.2108350290068,
"count": 452,
"self": 383.39349333001337,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.81734169899346,
"count": 22815,
"self": 207.81734169899346
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.035999957821332e-06,
"count": 1,
"self": 1.035999957821332e-06
},
"TrainerController._save_models": {
"total": 0.10418652299995301,
"count": 1,
"self": 0.0016979619999801798,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10248856099997283,
"count": 1,
"self": 0.10248856099997283
}
}
}
}
}
}
}