ppo-PyramidsRND / run_logs /timers.json
aymenkhs's picture
First version solving PyramidsRND using ppo
1d6f71b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36996200680732727,
"min": 0.36996200680732727,
"max": 1.424869179725647,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11075.1826171875,
"min": 11075.1826171875,
"max": 43224.83203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989945.0,
"min": 29952.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989945.0,
"min": 29952.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7005137801170349,
"min": -0.11336226016283035,
"max": 0.7096119523048401,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 200.34693908691406,
"min": -27.32030487060547,
"max": 206.4970703125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.020831672474741936,
"min": 0.014011269435286522,
"max": 0.466105192899704,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.957858085632324,
"min": 3.474794864654541,
"max": 110.46693420410156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06974971556984302,
"min": 0.065026826925099,
"max": 0.07606164600254454,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0462457335476454,
"min": 0.5245582093263912,
"max": 1.0519935794097062,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016856403055781913,
"min": 0.00044481398023328334,
"max": 0.017312602111598522,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2528460458367287,
"min": 0.0053377677627994,
"max": 0.2528460458367287,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.543817485426664e-06,
"min": 7.543817485426664e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011315726228139997,
"min": 0.00011315726228139997,
"max": 0.0031282722572427,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251457333333333,
"min": 0.10251457333333333,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377186,
"min": 1.3691136000000002,
"max": 2.4427573000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026120587599999995,
"min": 0.00026120587599999995,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00391808814,
"min": 0.00391808814,
"max": 0.10431145427000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01204988919198513,
"min": 0.012017972767353058,
"max": 0.5007407069206238,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1807483434677124,
"min": 0.1682516187429428,
"max": 3.5051848888397217,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 265.55357142857144,
"min": 249.36666666666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29742.0,
"min": 15984.0,
"max": 34005.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.716587490535208,
"min": -1.0000000521540642,
"max": 1.7506554419753932,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 192.2577989399433,
"min": -32.000001668930054,
"max": 208.3279975950718,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.716587490535208,
"min": -1.0000000521540642,
"max": 1.7506554419753932,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 192.2577989399433,
"min": -32.000001668930054,
"max": 208.3279975950718,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.032973176951989966,
"min": 0.032973176951989966,
"max": 10.091851200908422,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.692995818622876,
"min": 3.6399353367451113,
"max": 161.46961921453476,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679678449",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679680637"
},
"total": 2187.632946973,
"count": 1,
"self": 0.4243814229994314,
"children": {
"run_training.setup": {
"total": 0.10585389299990311,
"count": 1,
"self": 0.10585389299990311
},
"TrainerController.start_learning": {
"total": 2187.1027116570003,
"count": 1,
"self": 1.3085482730270996,
"children": {
"TrainerController._reset_env": {
"total": 7.253433421999944,
"count": 1,
"self": 7.253433421999944
},
"TrainerController.advance": {
"total": 2178.4503444099732,
"count": 64120,
"self": 1.3721845960267274,
"children": {
"env_step": {
"total": 1554.3524011660118,
"count": 64120,
"self": 1448.4077842799998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.1294287910323,
"count": 64120,
"self": 4.603278246096352,
"children": {
"TorchPolicy.evaluate": {
"total": 100.52615054493594,
"count": 62573,
"self": 100.52615054493594
}
}
},
"workers": {
"total": 0.8151880949797032,
"count": 64120,
"self": 0.0,
"children": {
"worker_root": {
"total": 2182.4185157589477,
"count": 64120,
"is_parallel": true,
"self": 846.6449541998729,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017810470001222711,
"count": 1,
"is_parallel": true,
"self": 0.0005213949998505996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012596520002716716,
"count": 8,
"is_parallel": true,
"self": 0.0012596520002716716
}
}
},
"UnityEnvironment.step": {
"total": 0.04597098800013555,
"count": 1,
"is_parallel": true,
"self": 0.0005271830000310729,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004611179999756132,
"count": 1,
"is_parallel": true,
"self": 0.0004611179999756132
},
"communicator.exchange": {
"total": 0.04341114700014259,
"count": 1,
"is_parallel": true,
"self": 0.04341114700014259
},
"steps_from_proto": {
"total": 0.0015715399999862711,
"count": 1,
"is_parallel": true,
"self": 0.000356052000370255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012154879996160162,
"count": 8,
"is_parallel": true,
"self": 0.0012154879996160162
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1335.7735615590748,
"count": 64119,
"is_parallel": true,
"self": 31.098195949989076,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.666739901009805,
"count": 64119,
"is_parallel": true,
"self": 22.666739901009805
},
"communicator.exchange": {
"total": 1190.6620188909833,
"count": 64119,
"is_parallel": true,
"self": 1190.6620188909833
},
"steps_from_proto": {
"total": 91.3466068170926,
"count": 64119,
"is_parallel": true,
"self": 19.292342446069824,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.05426437102278,
"count": 512952,
"is_parallel": true,
"self": 72.05426437102278
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 622.7257586479345,
"count": 64120,
"self": 2.417894213892396,
"children": {
"process_trajectory": {
"total": 117.52689983303844,
"count": 64120,
"self": 117.27304271403864,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2538571189998038,
"count": 2,
"self": 0.2538571189998038
}
}
},
"_update_policy": {
"total": 502.7809646010037,
"count": 439,
"self": 320.5255441089341,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.25542049206956,
"count": 22809,
"self": 182.25542049206956
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.600000001024455e-07,
"count": 1,
"self": 8.600000001024455e-07
},
"TrainerController._save_models": {
"total": 0.09038469200004329,
"count": 1,
"self": 0.0013917280002715415,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08899296399977175,
"count": 1,
"self": 0.08899296399977175
}
}
}
}
}
}
}