ppo-PyramidsRND / run_logs /timers.json
javiervela's picture
First training of PyramidsRND
d21bc34
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7225052714347839,
"min": 0.7173699736595154,
"max": 1.4328051805496216,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 21732.958984375,
"min": 21509.62109375,
"max": 43465.578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989942.0,
"min": 29972.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989942.0,
"min": 29972.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.39470812678337097,
"min": -0.1072504073381424,
"max": 0.4815601408481598,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 105.38706970214844,
"min": -25.740097045898438,
"max": 130.021240234375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00941851269453764,
"min": -0.0017757037421688437,
"max": 0.4064343273639679,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.514742851257324,
"min": -0.47056150436401367,
"max": 96.73136901855469,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06433017812794008,
"min": 0.06433017812794008,
"max": 0.07368988681849004,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9006224937911611,
"min": 0.6523103487429837,
"max": 1.059398212673841,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014395221496497034,
"min": 0.0005385811441333376,
"max": 0.014395221496497034,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20153310095095847,
"min": 0.00700155487373339,
"max": 0.20153310095095847,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.528811776142853e-06,
"min": 7.528811776142853e-06,
"max": 0.00029487000171,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010540336486599994,
"min": 0.00010540336486599994,
"max": 0.0036079722973426,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250957142857142,
"min": 0.10250957142857142,
"max": 0.19829000000000002,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.435134,
"min": 1.435134,
"max": 2.5697579,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002607061857142856,
"min": 0.0002607061857142856,
"max": 0.009829171000000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036498865999999987,
"min": 0.0036498865999999987,
"max": 0.12027547425999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009803004562854767,
"min": 0.009803004562854767,
"max": 0.44495153427124023,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13724206387996674,
"min": 0.13724206387996674,
"max": 4.004563808441162,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 432.6056338028169,
"min": 408.28767123287673,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30715.0,
"min": 17538.0,
"max": 33373.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4546872991281496,
"min": -0.9999067192276319,
"max": 1.4546872991281496,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 103.28279823809862,
"min": -29.997201576828957,
"max": 106.12239834666252,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4546872991281496,
"min": -0.9999067192276319,
"max": 1.4546872991281496,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 103.28279823809862,
"min": -29.997201576828957,
"max": 106.12239834666252,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04372790544523372,
"min": 0.041855747703768646,
"max": 8.908532566494411,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1046812866115943,
"min": 2.8043350961524993,
"max": 160.3535861968994,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674760173",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674762039"
},
"total": 1865.3107823379999,
"count": 1,
"self": 0.4300490250000166,
"children": {
"run_training.setup": {
"total": 0.09980248900001243,
"count": 1,
"self": 0.09980248900001243
},
"TrainerController.start_learning": {
"total": 1864.7809308239998,
"count": 1,
"self": 1.0424040970258375,
"children": {
"TrainerController._reset_env": {
"total": 5.989989850000029,
"count": 1,
"self": 5.989989850000029
},
"TrainerController.advance": {
"total": 1857.6652969999736,
"count": 63578,
"self": 1.0902688489861703,
"children": {
"env_step": {
"total": 1245.8556656499838,
"count": 63578,
"self": 1150.5021807419557,
"children": {
"SubprocessEnvManager._take_step": {
"total": 94.68440885101245,
"count": 63578,
"self": 3.975563891034426,
"children": {
"TorchPolicy.evaluate": {
"total": 90.70884495997802,
"count": 62548,
"self": 30.852047241987975,
"children": {
"TorchPolicy.sample_actions": {
"total": 59.85679771799005,
"count": 62548,
"self": 59.85679771799005
}
}
}
}
},
"workers": {
"total": 0.6690760570156726,
"count": 63578,
"self": 0.0,
"children": {
"worker_root": {
"total": 1862.1578476419613,
"count": 63578,
"is_parallel": true,
"self": 800.106172716961,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017903620000652154,
"count": 1,
"is_parallel": true,
"self": 0.0006699140005821391,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011204479994830763,
"count": 8,
"is_parallel": true,
"self": 0.0011204479994830763
}
}
},
"UnityEnvironment.step": {
"total": 0.04283061999990423,
"count": 1,
"is_parallel": true,
"self": 0.0004614689999016264,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004026550000162388,
"count": 1,
"is_parallel": true,
"self": 0.0004026550000162388
},
"communicator.exchange": {
"total": 0.04044613899986871,
"count": 1,
"is_parallel": true,
"self": 0.04044613899986871
},
"steps_from_proto": {
"total": 0.0015203570001176558,
"count": 1,
"is_parallel": true,
"self": 0.0003946690003431286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011256879997745273,
"count": 8,
"is_parallel": true,
"self": 0.0011256879997745273
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1062.0516749250003,
"count": 63577,
"is_parallel": true,
"self": 26.17037994203497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.94434531902789,
"count": 63577,
"is_parallel": true,
"self": 20.94434531902789
},
"communicator.exchange": {
"total": 928.8634380610088,
"count": 63577,
"is_parallel": true,
"self": 928.8634380610088
},
"steps_from_proto": {
"total": 86.07351160292865,
"count": 63577,
"is_parallel": true,
"self": 20.020317011880024,
"children": {
"_process_rank_one_or_two_observation": {
"total": 66.05319459104862,
"count": 508616,
"is_parallel": true,
"self": 66.05319459104862
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 610.7193625010036,
"count": 63578,
"self": 1.9662068870131861,
"children": {
"process_trajectory": {
"total": 134.00532010599363,
"count": 63578,
"self": 133.82340233599325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.181917770000382,
"count": 2,
"self": 0.181917770000382
}
}
},
"_update_policy": {
"total": 474.7478355079968,
"count": 459,
"self": 176.97164415803672,
"children": {
"TorchPPOOptimizer.update": {
"total": 297.7761913499601,
"count": 22785,
"self": 297.7761913499601
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.350001164420974e-07,
"count": 1,
"self": 9.350001164420974e-07
},
"TrainerController._save_models": {
"total": 0.08323894200020732,
"count": 1,
"self": 0.0018647290003173111,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08137421299989,
"count": 1,
"self": 0.08137421299989
}
}
}
}
}
}
}