fedorl's picture
First Push
50e933a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.6971714496612549,
"min": 1.6971714496612549,
"max": 2.8765087127685547,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8662.36328125,
"min": 8032.01806640625,
"max": 14681.7001953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 4976.0,
"max": 99960.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 4976.0,
"max": 99960.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.076581954956055,
"min": 0.2706344723701477,
"max": 9.076581954956055,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 880.428466796875,
"min": 26.251543045043945,
"max": 945.4606323242188,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07450252501567935,
"min": 0.06195335697916829,
"max": 0.08091010043041452,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.1490050500313587,
"min": 0.12390671395833658,
"max": 0.22157836427875593,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2782524317210796,
"min": 0.10734115374347597,
"max": 0.28654323868891773,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5565048634421592,
"min": 0.21468230748695194,
"max": 0.8596297160667532,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.564097812000004e-06,
"min": 6.564097812000004e-06,
"max": 0.000290364003212,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.3128195624000008e-05,
"min": 1.3128195624000008e-05,
"max": 0.000742392052536,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10218800000000001,
"min": 0.10218800000000001,
"max": 0.19678800000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20437600000000003,
"min": 0.20437600000000003,
"max": 0.547464,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011918120000000008,
"min": 0.00011918120000000008,
"max": 0.0048397212,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023836240000000016,
"min": 0.00023836240000000016,
"max": 0.012378453600000002,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 4378.0,
"min": 4378.0,
"max": 6567.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 18.454545454545453,
"min": 3.0454545454545454,
"max": 18.939393939393938,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 406.0,
"min": 67.0,
"max": 625.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 18.454545454545453,
"min": 3.0454545454545454,
"max": 18.939393939393938,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 406.0,
"min": 67.0,
"max": 625.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743800784",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743800998"
},
"total": 213.86556662099997,
"count": 1,
"self": 0.43441494299997885,
"children": {
"run_training.setup": {
"total": 0.023158139999964078,
"count": 1,
"self": 0.023158139999964078
},
"TrainerController.start_learning": {
"total": 213.40799353800003,
"count": 1,
"self": 0.16598357700024735,
"children": {
"TrainerController._reset_env": {
"total": 3.1383354299999837,
"count": 1,
"self": 3.1383354299999837
},
"TrainerController.advance": {
"total": 210.00449963699987,
"count": 9128,
"self": 0.17580042600320667,
"children": {
"env_step": {
"total": 147.52412846399977,
"count": 9128,
"self": 112.40128614100172,
"children": {
"SubprocessEnvManager._take_step": {
"total": 35.01949567699762,
"count": 9128,
"self": 0.6232856429954836,
"children": {
"TorchPolicy.evaluate": {
"total": 34.39621003400214,
"count": 9128,
"self": 34.39621003400214
}
}
},
"workers": {
"total": 0.10334664600043197,
"count": 9128,
"self": 0.0,
"children": {
"worker_root": {
"total": 212.55646003499385,
"count": 9128,
"is_parallel": true,
"self": 113.71880058499983,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005899667999983649,
"count": 1,
"is_parallel": true,
"self": 0.004318211999930099,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015814560000535494,
"count": 10,
"is_parallel": true,
"self": 0.0015814560000535494
}
}
},
"UnityEnvironment.step": {
"total": 0.07697206299997106,
"count": 1,
"is_parallel": true,
"self": 0.0006599989998221645,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004249280000294675,
"count": 1,
"is_parallel": true,
"self": 0.0004249280000294675
},
"communicator.exchange": {
"total": 0.07352100900004643,
"count": 1,
"is_parallel": true,
"self": 0.07352100900004643
},
"steps_from_proto": {
"total": 0.002366127000072993,
"count": 1,
"is_parallel": true,
"self": 0.0004984030001651263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018677239999078665,
"count": 10,
"is_parallel": true,
"self": 0.0018677239999078665
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 98.83765944999402,
"count": 9127,
"is_parallel": true,
"self": 4.904732025995372,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.6592514169974493,
"count": 9127,
"is_parallel": true,
"self": 2.6592514169974493
},
"communicator.exchange": {
"total": 75.94457990100784,
"count": 9127,
"is_parallel": true,
"self": 75.94457990100784
},
"steps_from_proto": {
"total": 15.329096105993358,
"count": 9127,
"is_parallel": true,
"self": 2.7501741810026488,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.57892192499071,
"count": 91270,
"is_parallel": true,
"self": 12.57892192499071
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 62.30457074699689,
"count": 9128,
"self": 0.22340124099878267,
"children": {
"process_trajectory": {
"total": 13.532900494998557,
"count": 9128,
"self": 13.104127864998532,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42877263000002586,
"count": 4,
"self": 0.42877263000002586
}
}
},
"_update_policy": {
"total": 48.54826901099955,
"count": 45,
"self": 19.653073322002115,
"children": {
"TorchPPOOptimizer.update": {
"total": 28.895195688997433,
"count": 2292,
"self": 28.895195688997433
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0930000371445203e-06,
"count": 1,
"self": 1.0930000371445203e-06
},
"TrainerController._save_models": {
"total": 0.09917380099989259,
"count": 1,
"self": 0.0009515669999018428,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09822223399999075,
"count": 1,
"self": 0.09822223399999075
}
}
}
}
}
}
}