sinny's picture
first try`
c7b8bdc
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1662611961364746,
"min": 1.1662611961364746,
"max": 2.8611679077148438,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11135.4619140625,
"min": 11135.4619140625,
"max": 29332.693359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.428703308105469,
"min": 0.35668379068374634,
"max": 12.428703308105469,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2423.59716796875,
"min": 69.1966552734375,
"max": 2513.271484375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06026895807693054,
"min": 0.06026895807693054,
"max": 0.0740331245138118,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24107583230772217,
"min": 0.24107583230772217,
"max": 0.37016562256905905,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20805378952154926,
"min": 0.11242406598038918,
"max": 0.2862101056266065,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8322151580861971,
"min": 0.4496962639215567,
"max": 1.3548536274363012,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.727272727272727,
"min": 3.3181818181818183,
"max": 24.727272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1088.0,
"min": 146.0,
"max": 1345.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.727272727272727,
"min": 3.3181818181818183,
"max": 24.727272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1088.0,
"min": 146.0,
"max": 1345.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675344261",
"python_version": "3.9.2 (default, Feb 28 2021, 17:03:44) \n[GCC 10.2.1 20210110]",
"command_line_arguments": "/home/olav/dev/python/hug-rl/.venv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.23.0",
"end_time_seconds": "1675344669"
},
"total": 407.81601024300016,
"count": 1,
"self": 0.3204813310003374,
"children": {
"run_training.setup": {
"total": 0.015708887999608123,
"count": 1,
"self": 0.015708887999608123
},
"TrainerController.start_learning": {
"total": 407.4798200240002,
"count": 1,
"self": 0.696593027056224,
"children": {
"TrainerController._reset_env": {
"total": 0.7398178429994005,
"count": 1,
"self": 0.7398178429994005
},
"TrainerController.advance": {
"total": 405.95579459194414,
"count": 18204,
"self": 0.37549311893417325,
"children": {
"env_step": {
"total": 405.58030147300997,
"count": 18204,
"self": 308.3815203889262,
"children": {
"SubprocessEnvManager._take_step": {
"total": 96.84380461899036,
"count": 18204,
"self": 1.6821978800080615,
"children": {
"TorchPolicy.evaluate": {
"total": 95.1616067389823,
"count": 18204,
"self": 13.197995755970624,
"children": {
"TorchPolicy.sample_actions": {
"total": 81.96361098301168,
"count": 18204,
"self": 81.96361098301168
}
}
}
}
},
"workers": {
"total": 0.35497646509338665,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 406.59362366494497,
"count": 18204,
"is_parallel": true,
"self": 178.434302886948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001477494999562623,
"count": 1,
"is_parallel": true,
"self": 0.0004141019981034333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010633930014591897,
"count": 10,
"is_parallel": true,
"self": 0.0010633930014591897
}
}
},
"UnityEnvironment.step": {
"total": 0.041202341999451164,
"count": 1,
"is_parallel": true,
"self": 0.0005665069993483485,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005424789997050539,
"count": 1,
"is_parallel": true,
"self": 0.0005424789997050539
},
"communicator.exchange": {
"total": 0.03817544600042311,
"count": 1,
"is_parallel": true,
"self": 0.03817544600042311
},
"steps_from_proto": {
"total": 0.001917909999974654,
"count": 1,
"is_parallel": true,
"self": 0.00047059600001375657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014473139999608975,
"count": 10,
"is_parallel": true,
"self": 0.0014473139999608975
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 228.15932077799698,
"count": 18203,
"is_parallel": true,
"self": 9.970444253203823,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.343786343943975,
"count": 18203,
"is_parallel": true,
"self": 8.343786343943975
},
"communicator.exchange": {
"total": 178.2304511028915,
"count": 18203,
"is_parallel": true,
"self": 178.2304511028915
},
"steps_from_proto": {
"total": 31.61463907795769,
"count": 18203,
"is_parallel": true,
"self": 7.632101192280061,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.98253788567763,
"count": 182030,
"is_parallel": true,
"self": 23.98253788567763
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001740890002110973,
"count": 1,
"self": 0.0001740890002110973,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 404.7388965471664,
"count": 202584,
"is_parallel": true,
"self": 4.42287842711994,
"children": {
"process_trajectory": {
"total": 228.55206522804656,
"count": 202584,
"is_parallel": true,
"self": 227.75059013504688,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8014750929996808,
"count": 4,
"is_parallel": true,
"self": 0.8014750929996808
}
}
},
"_update_policy": {
"total": 171.7639528919999,
"count": 90,
"is_parallel": true,
"self": 30.63024234099612,
"children": {
"TorchPPOOptimizer.update": {
"total": 141.1337105510038,
"count": 4587,
"is_parallel": true,
"self": 141.1337105510038
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08744047300024249,
"count": 1,
"self": 0.0008018159996936447,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08663865700054885,
"count": 1,
"self": 0.08663865700054885
}
}
}
}
}
}
}