Arseni10Lk's picture
First Push
e2c07b3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9314367771148682,
"min": 0.9271849393844604,
"max": 2.841240406036377,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8852.375,
"min": 8852.375,
"max": 29003.3828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.087446212768555,
"min": 0.1095513105392456,
"max": 12.087446212768555,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2357.052001953125,
"min": 21.252954483032227,
"max": 2435.46630859375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07035013084846557,
"min": 0.061738841835119504,
"max": 0.07641182797709846,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2814005233938623,
"min": 0.24695536734047802,
"max": 0.3820591398854923,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18036154277768787,
"min": 0.12519046212158474,
"max": 0.2637467925455056,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7214461711107515,
"min": 0.500761848486339,
"max": 1.318733962727528,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.954545454545453,
"min": 3.5,
"max": 24.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1054.0,
"min": 154.0,
"max": 1320.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.954545454545453,
"min": 3.5,
"max": 24.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1054.0,
"min": 154.0,
"max": 1320.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1771884561",
"python_version": "3.10.10 (main, Mar 21 2023, 18:45:11) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1771885065"
},
"total": 504.84594874999993,
"count": 1,
"self": 0.43582329000037134,
"children": {
"run_training.setup": {
"total": 0.023479136999867478,
"count": 1,
"self": 0.023479136999867478
},
"TrainerController.start_learning": {
"total": 504.3866463229997,
"count": 1,
"self": 0.4838655879957514,
"children": {
"TrainerController._reset_env": {
"total": 3.1370153340003526,
"count": 1,
"self": 3.1370153340003526
},
"TrainerController.advance": {
"total": 500.67596756300327,
"count": 18192,
"self": 0.4585581210512828,
"children": {
"env_step": {
"total": 368.2551024589334,
"count": 18192,
"self": 287.27557590698234,
"children": {
"SubprocessEnvManager._take_step": {
"total": 80.68205171298678,
"count": 18192,
"self": 1.4403083239981243,
"children": {
"TorchPolicy.evaluate": {
"total": 79.24174338898865,
"count": 18192,
"self": 79.24174338898865
}
}
},
"workers": {
"total": 0.29747483896426274,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 502.5532606740062,
"count": 18192,
"is_parallel": true,
"self": 250.88787110601015,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00477929699991364,
"count": 1,
"is_parallel": true,
"self": 0.003368804999809072,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014104920001045684,
"count": 10,
"is_parallel": true,
"self": 0.0014104920001045684
}
}
},
"UnityEnvironment.step": {
"total": 0.03877793700030452,
"count": 1,
"is_parallel": true,
"self": 0.0005658140007653856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043740399996750057,
"count": 1,
"is_parallel": true,
"self": 0.00043740399996750057
},
"communicator.exchange": {
"total": 0.035765949999586155,
"count": 1,
"is_parallel": true,
"self": 0.035765949999586155
},
"steps_from_proto": {
"total": 0.0020087689999854774,
"count": 1,
"is_parallel": true,
"self": 0.00039580899920110824,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016129600007843692,
"count": 10,
"is_parallel": true,
"self": 0.0016129600007843692
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 251.66538956799604,
"count": 18191,
"is_parallel": true,
"self": 11.288755124004638,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.197572445010792,
"count": 18191,
"is_parallel": true,
"self": 6.197572445010792
},
"communicator.exchange": {
"total": 192.26959556296606,
"count": 18191,
"is_parallel": true,
"self": 192.26959556296606
},
"steps_from_proto": {
"total": 41.909466436014554,
"count": 18191,
"is_parallel": true,
"self": 7.363012939018972,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.54645349699558,
"count": 181910,
"is_parallel": true,
"self": 34.54645349699558
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 131.9623069830186,
"count": 18192,
"self": 0.5892544280422953,
"children": {
"process_trajectory": {
"total": 30.657050600981165,
"count": 18192,
"self": 30.166340573981415,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49071002699974997,
"count": 4,
"self": 0.49071002699974997
}
}
},
"_update_policy": {
"total": 100.71600195399515,
"count": 90,
"self": 41.368342776012014,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.34765917798313,
"count": 4587,
"self": 59.34765917798313
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.780001164472196e-07,
"count": 1,
"self": 9.780001164472196e-07
},
"TrainerController._save_models": {
"total": 0.08979686000020592,
"count": 1,
"self": 0.0008171040003617236,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0889797559998442,
"count": 1,
"self": 0.0889797559998442
}
}
}
}
}
}
}