ppo-Pyramids / run_logs /timers.json
dlarionov's picture
Test1
99512a9 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5653989911079407,
"min": 0.5653989911079407,
"max": 1.4862802028656006,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 16808.181640625,
"min": 16808.181640625,
"max": 45087.796875,
"count": 10
},
"Pyramids.Step.mean": {
"value": 299996.0,
"min": 29952.0,
"max": 299996.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 299996.0,
"min": 29952.0,
"max": 299996.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02738116681575775,
"min": -0.09582620859146118,
"max": -0.016063055023550987,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.681004524230957,
"min": -23.117843627929688,
"max": -3.903322458267212,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05234517529606819,
"min": 0.05234517529606819,
"max": 0.30462321639060974,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 12.772222518920898,
"min": 12.772222518920898,
"max": 73.41419219970703,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06409473829038236,
"min": 0.06409473829038236,
"max": 0.07202698358475383,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8973263360653531,
"min": 0.4738851885539453,
"max": 0.9948474767126223,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004295561459316139,
"min": 0.0015652213258171269,
"max": 0.008007535235625012,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.060137860430425946,
"min": 0.01878265590980552,
"max": 0.060633127308228965,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5682523343952376e-05,
"min": 1.5682523343952376e-05,
"max": 0.0002838354339596191,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00021955532681533328,
"min": 0.00021955532681533328,
"max": 0.0027049592983469993,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1052274761904762,
"min": 0.1052274761904762,
"max": 0.19461180952380958,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4731846666666668,
"min": 1.362282666666667,
"max": 2.146786666666667,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0005322248714285714,
"min": 0.0005322248714285714,
"max": 0.00946171977142857,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0074511481999999995,
"min": 0.0074511481999999995,
"max": 0.09019513470000001,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.04209328442811966,
"min": 0.04209328442811966,
"max": 0.40296855568885803,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.5893059968948364,
"min": 0.5893059968948364,
"max": 2.820779800415039,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 868.2941176470588,
"min": 868.2941176470588,
"max": 999.0,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29522.0,
"min": 15984.0,
"max": 33009.0,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.33589701535123767,
"min": -1.0000000521540642,
"max": -0.31159379379823804,
"count": 10
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -11.084601506590843,
"min": -29.037401787936687,
"max": -9.971001401543617,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.33589701535123767,
"min": -1.0000000521540642,
"max": -0.31159379379823804,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -11.084601506590843,
"min": -29.037401787936687,
"max": -9.971001401543617,
"count": 10
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.38493717608578276,
"min": 0.38493717608578276,
"max": 7.654727725312114,
"count": 10
},
"Pyramids.Policy.RndReward.sum": {
"value": 12.702926810830832,
"min": 12.702926810830832,
"max": 122.47564360499382,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1721995330",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=/content/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1721996328"
},
"total": 998.0885114589996,
"count": 1,
"self": 0.833706854999491,
"children": {
"run_training.setup": {
"total": 0.13097791800009873,
"count": 1,
"self": 0.13097791800009873
},
"TrainerController.start_learning": {
"total": 997.123826686,
"count": 1,
"self": 0.6798823610129148,
"children": {
"TrainerController._reset_env": {
"total": 4.04376729500018,
"count": 1,
"self": 4.04376729500018
},
"TrainerController.advance": {
"total": 992.2888014529872,
"count": 18940,
"self": 0.8367100980422038,
"children": {
"env_step": {
"total": 650.988207995918,
"count": 18940,
"self": 596.6075197898658,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.9182135140195,
"count": 18940,
"self": 2.437468883047586,
"children": {
"TorchPolicy.evaluate": {
"total": 51.48074463097191,
"count": 18811,
"self": 51.48074463097191
}
}
},
"workers": {
"total": 0.4624746920326288,
"count": 18940,
"self": 0.0,
"children": {
"worker_root": {
"total": 994.8375258430619,
"count": 18940,
"is_parallel": true,
"self": 458.0522480631048,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0050691960000222025,
"count": 1,
"is_parallel": true,
"self": 0.001457382000353391,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0036118139996688114,
"count": 8,
"is_parallel": true,
"self": 0.0036118139996688114
}
}
},
"UnityEnvironment.step": {
"total": 0.07316634000017075,
"count": 1,
"is_parallel": true,
"self": 0.0008945400004449766,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005821099998684076,
"count": 1,
"is_parallel": true,
"self": 0.0005821099998684076
},
"communicator.exchange": {
"total": 0.0691043339998032,
"count": 1,
"is_parallel": true,
"self": 0.0691043339998032
},
"steps_from_proto": {
"total": 0.002585356000054162,
"count": 1,
"is_parallel": true,
"self": 0.00044130499964012415,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002144051000414038,
"count": 8,
"is_parallel": true,
"self": 0.002144051000414038
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 536.7852777799571,
"count": 18939,
"is_parallel": true,
"self": 15.727382065957954,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.70050056498394,
"count": 18939,
"is_parallel": true,
"self": 9.70050056498394
},
"communicator.exchange": {
"total": 471.09708704601735,
"count": 18939,
"is_parallel": true,
"self": 471.09708704601735
},
"steps_from_proto": {
"total": 40.2603081029979,
"count": 18939,
"is_parallel": true,
"self": 8.80105579002111,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.459252312976787,
"count": 151512,
"is_parallel": true,
"self": 31.459252312976787
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 340.463883359027,
"count": 18940,
"self": 1.2769077820089478,
"children": {
"process_trajectory": {
"total": 54.41012913301893,
"count": 18940,
"self": 54.41012913301893
},
"_update_policy": {
"total": 284.77684644399915,
"count": 123,
"self": 112.66819105198692,
"children": {
"TorchPPOOptimizer.update": {
"total": 172.10865539201222,
"count": 6831,
"self": 172.10865539201222
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3639996723213699e-06,
"count": 1,
"self": 1.3639996723213699e-06
},
"TrainerController._save_models": {
"total": 0.1113742130000901,
"count": 1,
"self": 0.0041445390002081695,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10722967399988192,
"count": 1,
"self": 0.10722967399988192
}
}
}
}
}
}
}