Pyramids / run_logs /timers.json
sagravela's picture
Pyramids first push
73d01f2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6509416103363037,
"min": 0.6509416103363037,
"max": 1.4029514789581299,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19444.927734375,
"min": 19444.927734375,
"max": 42559.9375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3457738161087036,
"min": -0.2010049670934677,
"max": 0.4050307869911194,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 90.24696350097656,
"min": -47.63817596435547,
"max": 106.92813110351562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 135.45318603515625,
"min": -0.0005455068894661963,
"max": 135.45318603515625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 35353.28125,
"min": -0.13746774196624756,
"max": 35353.28125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07116542290618998,
"min": 0.06388296890197255,
"max": 0.07385484702164331,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9963159206866596,
"min": 0.4817108204762247,
"max": 1.0532231060913582,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 1851.6963029361907,
"min": 0.00024384026576801846,
"max": 1851.6963029361907,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 25923.74824110667,
"min": 0.0025457051088406558,
"max": 25923.74824110667,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.300640423628573e-06,
"min": 7.300640423628573e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010220896593080002,
"min": 0.00010220896593080002,
"max": 0.0030219080926974003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243351428571427,
"min": 0.10243351428571427,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340692,
"min": 1.3886848,
"max": 2.4009790000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025310807714285725,
"min": 0.00025310807714285725,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035435130800000017,
"min": 0.0035435130800000017,
"max": 0.10075952973999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00783002283424139,
"min": 0.00782970804721117,
"max": 0.35662126541137695,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10962032526731491,
"min": 0.10961591452360153,
"max": 2.4963488578796387,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 480.453125,
"min": 432.96923076923076,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30749.0,
"min": 15984.0,
"max": 32905.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3007280931342393,
"min": -1.0000000521540642,
"max": 1.5362522881764633,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 83.24659796059132,
"min": -32.000001668930054,
"max": 99.85639873147011,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3007280931342393,
"min": -1.0000000521540642,
"max": 1.5362522881764633,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 83.24659796059132,
"min": -32.000001668930054,
"max": 99.85639873147011,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04010859162281122,
"min": 0.03515081175047761,
"max": 7.064080120995641,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.566949863859918,
"min": 2.2848027637810446,
"max": 113.02528193593025,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710786859",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=/content/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1710788925"
},
"total": 2065.710465668,
"count": 1,
"self": 0.5936395920007271,
"children": {
"run_training.setup": {
"total": 0.10453902899962486,
"count": 1,
"self": 0.10453902899962486
},
"TrainerController.start_learning": {
"total": 2065.0122870469995,
"count": 1,
"self": 1.2720890359605619,
"children": {
"TrainerController._reset_env": {
"total": 4.141118183000344,
"count": 1,
"self": 4.141118183000344
},
"TrainerController.advance": {
"total": 2059.508407389039,
"count": 63335,
"self": 1.3779396110348898,
"children": {
"env_step": {
"total": 1401.782214859019,
"count": 63335,
"self": 1273.0833209249695,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.91015102903384,
"count": 63335,
"self": 4.6484490040134006,
"children": {
"TorchPolicy.evaluate": {
"total": 123.26170202502044,
"count": 62557,
"self": 123.26170202502044
}
}
},
"workers": {
"total": 0.7887429050156243,
"count": 63335,
"self": 0.0,
"children": {
"worker_root": {
"total": 2060.2796150749655,
"count": 63335,
"is_parallel": true,
"self": 896.9964837560178,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005344448999949236,
"count": 1,
"is_parallel": true,
"self": 0.003886063999743783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014583850002054533,
"count": 8,
"is_parallel": true,
"self": 0.0014583850002054533
}
}
},
"UnityEnvironment.step": {
"total": 0.05039602599981663,
"count": 1,
"is_parallel": true,
"self": 0.000611895000020013,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004999789998691995,
"count": 1,
"is_parallel": true,
"self": 0.0004999789998691995
},
"communicator.exchange": {
"total": 0.04751114500004405,
"count": 1,
"is_parallel": true,
"self": 0.04751114500004405
},
"steps_from_proto": {
"total": 0.0017730069998833642,
"count": 1,
"is_parallel": true,
"self": 0.00037849700083825155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013945099990451126,
"count": 8,
"is_parallel": true,
"self": 0.0013945099990451126
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1163.2831313189477,
"count": 63334,
"is_parallel": true,
"self": 34.29297426590665,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.028847002978637,
"count": 63334,
"is_parallel": true,
"self": 24.028847002978637
},
"communicator.exchange": {
"total": 998.5090422270491,
"count": 63334,
"is_parallel": true,
"self": 998.5090422270491
},
"steps_from_proto": {
"total": 106.45226782301324,
"count": 63334,
"is_parallel": true,
"self": 20.45399349096897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.99827433204428,
"count": 506672,
"is_parallel": true,
"self": 85.99827433204428
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 656.348252918985,
"count": 63335,
"self": 2.3225651520169777,
"children": {
"process_trajectory": {
"total": 120.01191298296544,
"count": 63335,
"self": 119.74777153796458,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2641414450008597,
"count": 2,
"self": 0.2641414450008597
}
}
},
"_update_policy": {
"total": 534.0137747840026,
"count": 436,
"self": 331.4164921870338,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.59728259696885,
"count": 22869,
"self": 202.59728259696885
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.899996828404255e-07,
"count": 1,
"self": 8.899996828404255e-07
},
"TrainerController._save_models": {
"total": 0.09067154900003516,
"count": 1,
"self": 0.0015128670002013678,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08915868199983379,
"count": 1,
"self": 0.08915868199983379
}
}
}
}
}
}
}