ppo-Pyramids / run_logs /timers.json
Aedelon's picture
First commit.
a842dd4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3366871774196625,
"min": 0.3330022096633911,
"max": 1.4219340085983276,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10235.2900390625,
"min": 9984.73828125,
"max": 43135.7890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989877.0,
"min": 29952.0,
"max": 989877.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989877.0,
"min": 29952.0,
"max": 989877.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6816462874412537,
"min": -0.07592428475618362,
"max": 0.68465656042099,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 196.3141326904297,
"min": -18.297752380371094,
"max": 201.97369384765625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03980015218257904,
"min": 0.00152592605445534,
"max": 0.48404577374458313,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 11.462443351745605,
"min": 0.41810375452041626,
"max": 114.7188491821289,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06694541648806383,
"min": 0.06646687135179279,
"max": 0.07333578196485536,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0041812473209575,
"min": 0.5133504737539876,
"max": 1.0666657999487748,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01642445712414984,
"min": 0.0006315113308721555,
"max": 0.017227739065121143,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2463668568622476,
"min": 0.008209647301338021,
"max": 0.2463668568622476,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.515037495019998e-06,
"min": 7.515037495019998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011272556242529997,
"min": 0.00011272556242529997,
"max": 0.0035088470303843993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250498000000001,
"min": 0.10250498000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375747000000002,
"min": 1.3886848,
"max": 2.5696156,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026024750199999996,
"min": 0.00026024750199999996,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039037125299999998,
"min": 0.0039037125299999998,
"max": 0.11698459844000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010924839414656162,
"min": 0.010924839414656162,
"max": 0.6330909729003906,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1638725847005844,
"min": 0.15804336965084076,
"max": 4.431636810302734,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 292.1588785046729,
"min": 254.34234234234233,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31261.0,
"min": 15984.0,
"max": 32686.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6517513869939564,
"min": -1.0000000521540642,
"max": 1.7279071247737323,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 176.73739840835333,
"min": -30.71720176190138,
"max": 193.525597974658,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6517513869939564,
"min": -1.0000000521540642,
"max": 1.7279071247737323,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 176.73739840835333,
"min": -30.71720176190138,
"max": 193.525597974658,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03261350840656522,
"min": 0.030612224453453303,
"max": 13.677852573804557,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4896453995024785,
"min": 3.42856913878677,
"max": 218.84564118087292,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705061374",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705063949"
},
"total": 2574.881598223,
"count": 1,
"self": 0.5274620090003737,
"children": {
"run_training.setup": {
"total": 0.054025351999371196,
"count": 1,
"self": 0.054025351999371196
},
"TrainerController.start_learning": {
"total": 2574.300110862,
"count": 1,
"self": 1.6097330481125027,
"children": {
"TrainerController._reset_env": {
"total": 2.6791225780007153,
"count": 1,
"self": 2.6791225780007153
},
"TrainerController.advance": {
"total": 2569.925552949887,
"count": 64187,
"self": 1.665976816926559,
"children": {
"env_step": {
"total": 1886.1826631479453,
"count": 64187,
"self": 1742.468621815141,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.6963787568602,
"count": 64187,
"self": 5.22195460990406,
"children": {
"TorchPolicy.evaluate": {
"total": 137.47442414695615,
"count": 62565,
"self": 137.47442414695615
}
}
},
"workers": {
"total": 1.0176625759440867,
"count": 64187,
"self": 0.0,
"children": {
"worker_root": {
"total": 2568.3049121659824,
"count": 64187,
"is_parallel": true,
"self": 957.5565839480278,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017127769997387077,
"count": 1,
"is_parallel": true,
"self": 0.0005435469984149677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00116923000132374,
"count": 8,
"is_parallel": true,
"self": 0.00116923000132374
}
}
},
"UnityEnvironment.step": {
"total": 0.07309752099990874,
"count": 1,
"is_parallel": true,
"self": 0.0005886429999009124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042253199990227586,
"count": 1,
"is_parallel": true,
"self": 0.00042253199990227586
},
"communicator.exchange": {
"total": 0.07046525700025086,
"count": 1,
"is_parallel": true,
"self": 0.07046525700025086
},
"steps_from_proto": {
"total": 0.0016210889998546918,
"count": 1,
"is_parallel": true,
"self": 0.000329165000039211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012919239998154808,
"count": 8,
"is_parallel": true,
"self": 0.0012919239998154808
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1610.7483282179546,
"count": 64186,
"is_parallel": true,
"self": 38.61767408814649,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.485541429919977,
"count": 64186,
"is_parallel": true,
"self": 25.485541429919977
},
"communicator.exchange": {
"total": 1438.957215160849,
"count": 64186,
"is_parallel": true,
"self": 1438.957215160849
},
"steps_from_proto": {
"total": 107.68789753903911,
"count": 64186,
"is_parallel": true,
"self": 22.574713788824738,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.11318375021438,
"count": 513488,
"is_parallel": true,
"self": 85.11318375021438
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 682.076912985015,
"count": 64187,
"self": 3.208744297985504,
"children": {
"process_trajectory": {
"total": 136.31882239402785,
"count": 64187,
"self": 136.11881576502765,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2000066290001996,
"count": 2,
"self": 0.2000066290001996
}
}
},
"_update_policy": {
"total": 542.5493462930017,
"count": 453,
"self": 322.5379275799869,
"children": {
"TorchPPOOptimizer.update": {
"total": 220.0114187130148,
"count": 22809,
"self": 220.0114187130148
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.109999155043624e-07,
"count": 1,
"self": 9.109999155043624e-07
},
"TrainerController._save_models": {
"total": 0.08570137500009878,
"count": 1,
"self": 0.0013972209999337792,
"children": {
"RLTrainer._checkpoint": {
"total": 0.084304154000165,
"count": 1,
"self": 0.084304154000165
}
}
}
}
}
}
}