ppo-Pyramids / run_logs /timers.json
AmrSheta's picture
First Push
1cc9822 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3013733923435211,
"min": 0.3013733923435211,
"max": 1.3869065046310425,
"count": 51
},
"Pyramids.Policy.Entropy.sum": {
"value": 8944.7626953125,
"min": 8944.7626953125,
"max": 42073.1953125,
"count": 51
},
"Pyramids.Step.mean": {
"value": 1529874.0,
"min": 29952.0,
"max": 1529874.0,
"count": 51
},
"Pyramids.Step.sum": {
"value": 1529874.0,
"min": 29952.0,
"max": 1529874.0,
"count": 51
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6376602053642273,
"min": -0.10865096747875214,
"max": 0.8013395071029663,
"count": 51
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 179.82017517089844,
"min": -26.07623291015625,
"max": 226.77908325195312,
"count": 51
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0068503571674227715,
"min": -0.023107096552848816,
"max": 0.5127955675125122,
"count": 51
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.9318007230758667,
"min": -6.192701816558838,
"max": 121.53254699707031,
"count": 51
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06443422263677635,
"min": 0.06260647159970609,
"max": 0.07329989886139611,
"count": 51
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9020791169148689,
"min": 0.5068109285935423,
"max": 1.0994984829209415,
"count": 51
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015139336185770323,
"min": 0.00015302859993752047,
"max": 0.016004248205544653,
"count": 51
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21195070660078452,
"min": 0.0021424003991252867,
"max": 0.22433037275842774,
"count": 51
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00014858180761560474,
"min": 0.00014858180761560474,
"max": 0.00029838354339596195,
"count": 51
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0020801453066184666,
"min": 0.0020801453066184666,
"max": 0.004011339262886934,
"count": 51
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14952725238095238,
"min": 0.14952725238095238,
"max": 0.19946118095238097,
"count": 51
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.0933815333333334,
"min": 1.3962282666666668,
"max": 2.737113066666667,
"count": 51
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004957772512857143,
"min": 0.004957772512857143,
"max": 0.009946171977142856,
"count": 51
},
"Pyramids.Policy.Beta.sum": {
"value": 0.06940881518,
"min": 0.06940881518,
"max": 0.13371759536,
"count": 51
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009086927399039268,
"min": 0.008219473995268345,
"max": 0.6906247138977051,
"count": 51
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12721697986125946,
"min": 0.11507263034582138,
"max": 4.8343729972839355,
"count": 51
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 296.90291262135923,
"min": 256.1304347826087,
"max": 999.0,
"count": 51
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30581.0,
"min": 15984.0,
"max": 33657.0,
"count": 51
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6642504691788294,
"min": -1.0000000521540642,
"max": 1.743869549813478,
"count": 51
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 171.41779832541943,
"min": -30.478001676499844,
"max": 200.54499822854996,
"count": 51
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6642504691788294,
"min": -1.0000000521540642,
"max": 1.743869549813478,
"count": 51
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 171.41779832541943,
"min": -30.478001676499844,
"max": 200.54499822854996,
"count": 51
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028028628551536827,
"min": 0.025254321085201516,
"max": 13.771021699532866,
"count": 51
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8869487408082932,
"min": 2.50017778743495,
"max": 220.33634719252586,
"count": 51
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 51
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 51
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710938354",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710941734"
},
"total": 3379.7989960259997,
"count": 1,
"self": 0.8631011649995344,
"children": {
"run_training.setup": {
"total": 0.07804342800000086,
"count": 1,
"self": 0.07804342800000086
},
"TrainerController.start_learning": {
"total": 3378.857851433,
"count": 1,
"self": 2.056766178111502,
"children": {
"TrainerController._reset_env": {
"total": 2.413214691000121,
"count": 1,
"self": 2.413214691000121
},
"TrainerController.advance": {
"total": 3374.2018100228884,
"count": 98993,
"self": 2.096173182968869,
"children": {
"env_step": {
"total": 2411.2447738710625,
"count": 98993,
"self": 2216.142988056031,
"children": {
"SubprocessEnvManager._take_step": {
"total": 193.85823684998104,
"count": 98993,
"self": 6.975426753015199,
"children": {
"TorchPolicy.evaluate": {
"total": 186.88281009696584,
"count": 96693,
"self": 186.88281009696584
}
}
},
"workers": {
"total": 1.2435489650506497,
"count": 98992,
"self": 0.0,
"children": {
"worker_root": {
"total": 3371.215016163822,
"count": 98992,
"is_parallel": true,
"self": 1335.4780909909382,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00206476500034114,
"count": 1,
"is_parallel": true,
"self": 0.000567830000363756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001496934999977384,
"count": 8,
"is_parallel": true,
"self": 0.001496934999977384
}
}
},
"UnityEnvironment.step": {
"total": 0.058058949999576726,
"count": 1,
"is_parallel": true,
"self": 0.006277705999764294,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005153219999556313,
"count": 1,
"is_parallel": true,
"self": 0.0005153219999556313
},
"communicator.exchange": {
"total": 0.0457263220000641,
"count": 1,
"is_parallel": true,
"self": 0.0457263220000641
},
"steps_from_proto": {
"total": 0.005539599999792699,
"count": 1,
"is_parallel": true,
"self": 0.004155742999955692,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013838569998370076,
"count": 8,
"is_parallel": true,
"self": 0.0013838569998370076
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2035.7369251728837,
"count": 98991,
"is_parallel": true,
"self": 52.44228848695866,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 35.41980783892086,
"count": 98991,
"is_parallel": true,
"self": 35.41980783892086
},
"communicator.exchange": {
"total": 1797.474503363885,
"count": 98991,
"is_parallel": true,
"self": 1797.474503363885
},
"steps_from_proto": {
"total": 150.4003254831191,
"count": 98991,
"is_parallel": true,
"self": 30.369625673021346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 120.03069981009776,
"count": 791928,
"is_parallel": true,
"self": 120.03069981009776
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 960.860862968857,
"count": 98992,
"self": 4.231509163881583,
"children": {
"process_trajectory": {
"total": 196.40715311397207,
"count": 98992,
"self": 196.08417555897267,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3229775549993974,
"count": 3,
"self": 0.3229775549993974
}
}
},
"_update_policy": {
"total": 760.2222006910033,
"count": 710,
"self": 442.7136882519826,
"children": {
"TorchPPOOptimizer.update": {
"total": 317.5085124390207,
"count": 35217,
"self": 317.5085124390207
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2620002962648869e-06,
"count": 1,
"self": 1.2620002962648869e-06
},
"TrainerController._save_models": {
"total": 0.18605927899989183,
"count": 1,
"self": 0.0021195889994487516,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18393969000044308,
"count": 1,
"self": 0.18393969000044308
}
}
}
}
}
}
}