Varun3003's picture
First Push
d265e8c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8732863068580627,
"min": 0.8732863068580627,
"max": 2.8264968395233154,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8299.712890625,
"min": 8299.712890625,
"max": 28852.87890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.134537696838379,
"min": 0.31424158811569214,
"max": 13.134537696838379,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2561.23486328125,
"min": 60.962867736816406,
"max": 2662.52099609375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06781588897038268,
"min": 0.06380827034764817,
"max": 0.07478472443808819,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2712635558815307,
"min": 0.2568268247962217,
"max": 0.37392362219044095,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19677381400091976,
"min": 0.12213643408362188,
"max": 0.28770003767282354,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.787095256003679,
"min": 0.4885457363344875,
"max": 1.4059344696647982,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.704545454545453,
"min": 3.4545454545454546,
"max": 25.863636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1131.0,
"min": 152.0,
"max": 1413.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.704545454545453,
"min": 3.4545454545454546,
"max": 25.863636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1131.0,
"min": 152.0,
"max": 1413.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743157826",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743158247"
},
"total": 421.76003875,
"count": 1,
"self": 0.4324933980000196,
"children": {
"run_training.setup": {
"total": 0.023754426999971656,
"count": 1,
"self": 0.023754426999971656
},
"TrainerController.start_learning": {
"total": 421.303790925,
"count": 1,
"self": 0.3201291289876167,
"children": {
"TrainerController._reset_env": {
"total": 3.1735527999999817,
"count": 1,
"self": 3.1735527999999817
},
"TrainerController.advance": {
"total": 417.71367066601243,
"count": 18192,
"self": 0.349807837017579,
"children": {
"env_step": {
"total": 294.5685893009975,
"count": 18192,
"self": 224.631186097997,
"children": {
"SubprocessEnvManager._take_step": {
"total": 69.73530930499794,
"count": 18192,
"self": 1.1890539609921689,
"children": {
"TorchPolicy.evaluate": {
"total": 68.54625534400577,
"count": 18192,
"self": 68.54625534400577
}
}
},
"workers": {
"total": 0.20209389800254485,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 419.86567884201224,
"count": 18192,
"is_parallel": true,
"self": 222.53417735601533,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005654164999953082,
"count": 1,
"is_parallel": true,
"self": 0.004192835999901945,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001461329000051137,
"count": 10,
"is_parallel": true,
"self": 0.001461329000051137
}
}
},
"UnityEnvironment.step": {
"total": 0.034474326999998084,
"count": 1,
"is_parallel": true,
"self": 0.0005944389999967825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000393729999984771,
"count": 1,
"is_parallel": true,
"self": 0.000393729999984771
},
"communicator.exchange": {
"total": 0.031695694000006824,
"count": 1,
"is_parallel": true,
"self": 0.031695694000006824
},
"steps_from_proto": {
"total": 0.001790464000009706,
"count": 1,
"is_parallel": true,
"self": 0.00035661600003322746,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014338479999764786,
"count": 10,
"is_parallel": true,
"self": 0.0014338479999764786
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 197.3315014859969,
"count": 18191,
"is_parallel": true,
"self": 9.668070109990083,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.323712397996076,
"count": 18191,
"is_parallel": true,
"self": 5.323712397996076
},
"communicator.exchange": {
"total": 151.65347826500397,
"count": 18191,
"is_parallel": true,
"self": 151.65347826500397
},
"steps_from_proto": {
"total": 30.686240713006782,
"count": 18191,
"is_parallel": true,
"self": 5.444078202010019,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.242162510996764,
"count": 181910,
"is_parallel": true,
"self": 25.242162510996764
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 122.79527352799738,
"count": 18192,
"self": 0.4157387339941465,
"children": {
"process_trajectory": {
"total": 26.68709785200366,
"count": 18192,
"self": 26.229253071003768,
"children": {
"RLTrainer._checkpoint": {
"total": 0.457844780999892,
"count": 4,
"self": 0.457844780999892
}
}
},
"_update_policy": {
"total": 95.69243694199957,
"count": 90,
"self": 38.38458081200059,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.30785612999898,
"count": 4587,
"self": 57.30785612999898
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0070000371342758e-06,
"count": 1,
"self": 1.0070000371342758e-06
},
"TrainerController._save_models": {
"total": 0.09643732299991825,
"count": 1,
"self": 0.0009461779999355713,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09549114499998268,
"count": 1,
"self": 0.09549114499998268
}
}
}
}
}
}
}