ppo-Pyramids / run_logs /timers.json
thomas2112's picture
First Push
0e07cab
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.48961931467056274,
"min": 0.48961931467056274,
"max": 1.4358364343643188,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14563.2373046875,
"min": 14563.2373046875,
"max": 43557.53515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5870927572250366,
"min": -0.2063315063714981,
"max": 0.5870927572250366,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.32144165039062,
"min": -48.90056610107422,
"max": 167.32144165039062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008662767708301544,
"min": -0.014964998699724674,
"max": 0.3496842384338379,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.468888759613037,
"min": -4.010619640350342,
"max": 82.87516784667969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06889947521752524,
"min": 0.0641984300288771,
"max": 0.07278443145387598,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9645926530453532,
"min": 0.501175922426949,
"max": 1.0622457314651303,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015839299129202986,
"min": 0.00010409314958800455,
"max": 0.016233065845638824,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22175018780884181,
"min": 0.0014573040942320638,
"max": 0.2434959876845824,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.294833282707137e-06,
"min": 7.294833282707137e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010212766595789992,
"min": 0.00010212766595789992,
"max": 0.0033835286721572,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243157857142858,
"min": 0.10243157857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340421,
"min": 1.3886848,
"max": 2.5278428,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025291469928571416,
"min": 0.00025291469928571416,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003540805789999998,
"min": 0.003540805789999998,
"max": 0.11281149572,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008177466690540314,
"min": 0.007845316082239151,
"max": 0.3454439043998718,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11448453366756439,
"min": 0.11314088106155396,
"max": 2.418107271194458,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 327.3478260869565,
"min": 327.3478260869565,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30116.0,
"min": 15984.0,
"max": 32670.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6348461366289264,
"min": -1.0000000521540642,
"max": 1.6348461366289264,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 148.7709984332323,
"min": -30.702001735568047,
"max": 148.7709984332323,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6348461366289264,
"min": -1.0000000521540642,
"max": 1.6348461366289264,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 148.7709984332323,
"min": -30.702001735568047,
"max": 148.7709984332323,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027118850615646967,
"min": 0.027118850615646967,
"max": 6.860795432701707,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.467815406023874,
"min": 2.451593256846536,
"max": 109.77272692322731,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697621043",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697623284"
},
"total": 2241.2717820939997,
"count": 1,
"self": 0.49286338799993246,
"children": {
"run_training.setup": {
"total": 0.044765146000031564,
"count": 1,
"self": 0.044765146000031564
},
"TrainerController.start_learning": {
"total": 2240.73415356,
"count": 1,
"self": 1.3346242050256478,
"children": {
"TrainerController._reset_env": {
"total": 7.816889100000026,
"count": 1,
"self": 7.816889100000026
},
"TrainerController.advance": {
"total": 2231.503753694974,
"count": 63662,
"self": 1.4065277810577754,
"children": {
"env_step": {
"total": 1589.14357397198,
"count": 63662,
"self": 1461.3013536309215,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.02462406001672,
"count": 63662,
"self": 4.781223866998346,
"children": {
"TorchPolicy.evaluate": {
"total": 122.24340019301837,
"count": 62550,
"self": 122.24340019301837
}
}
},
"workers": {
"total": 0.8175962810418014,
"count": 63662,
"self": 0.0,
"children": {
"worker_root": {
"total": 2236.180835088982,
"count": 63662,
"is_parallel": true,
"self": 892.7924007289744,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0056347060000234706,
"count": 1,
"is_parallel": true,
"self": 0.003654760000188162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019799459998353086,
"count": 8,
"is_parallel": true,
"self": 0.0019799459998353086
}
}
},
"UnityEnvironment.step": {
"total": 0.04897819400002845,
"count": 1,
"is_parallel": true,
"self": 0.000599248000071384,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005139749999898413,
"count": 1,
"is_parallel": true,
"self": 0.0005139749999898413
},
"communicator.exchange": {
"total": 0.04623507599995946,
"count": 1,
"is_parallel": true,
"self": 0.04623507599995946
},
"steps_from_proto": {
"total": 0.0016298950000077639,
"count": 1,
"is_parallel": true,
"self": 0.0003750179999997272,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012548770000080367,
"count": 8,
"is_parallel": true,
"self": 0.0012548770000080367
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1343.3884343600075,
"count": 63661,
"is_parallel": true,
"self": 35.61511579707894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.745647940959145,
"count": 63661,
"is_parallel": true,
"self": 24.745647940959145
},
"communicator.exchange": {
"total": 1182.6201432669945,
"count": 63661,
"is_parallel": true,
"self": 1182.6201432669945
},
"steps_from_proto": {
"total": 100.40752735497495,
"count": 63661,
"is_parallel": true,
"self": 19.987222054121787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.42030530085316,
"count": 509288,
"is_parallel": true,
"self": 80.42030530085316
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 640.9536519419362,
"count": 63662,
"self": 2.48728760294739,
"children": {
"process_trajectory": {
"total": 122.06907820598923,
"count": 63662,
"self": 121.79706501498919,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27201319100004184,
"count": 2,
"self": 0.27201319100004184
}
}
},
"_update_policy": {
"total": 516.3972861329996,
"count": 446,
"self": 309.7660623019841,
"children": {
"TorchPPOOptimizer.update": {
"total": 206.63122383101552,
"count": 22857,
"self": 206.63122383101552
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1239999366807751e-06,
"count": 1,
"self": 1.1239999366807751e-06
},
"TrainerController._save_models": {
"total": 0.07888543600029152,
"count": 1,
"self": 0.0015133340002648765,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07737210200002664,
"count": 1,
"self": 0.07737210200002664
}
}
}
}
}
}
}