Huggbottle's picture
First Push
6f64de2 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9923771619796753,
"min": 0.9923771619796753,
"max": 2.8786134719848633,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9780.869140625,
"min": 9780.869140625,
"max": 29384.88671875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9984.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9984.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.704163551330566,
"min": 0.1418629139661789,
"max": 12.704163551330566,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 4395.640625,
"min": 48.94270706176758,
"max": 4427.662109375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04467292573735904,
"min": 0.039076253460279986,
"max": 0.06054593183216639,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.17869170294943615,
"min": 0.15630501384111994,
"max": 0.26997842892888,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16892886254936457,
"min": 0.06613010034197941,
"max": 0.21004654802381992,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6757154501974583,
"min": 0.26452040136791766,
"max": 1.0502327401190996,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.347009730600001e-05,
"min": 1.347009730600001e-05,
"max": 0.00048647000270600005,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.388038922400004e-05,
"min": 5.388038922400004e-05,
"max": 0.00230860003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101347,
"min": 0.101347,
"max": 0.148647,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.405388,
"min": 0.405388,
"max": 0.73086,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.361060000000004e-05,
"min": 6.361060000000004e-05,
"max": 0.0019461506,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00025444240000000015,
"min": 0.00025444240000000015,
"max": 0.009238228000000001,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.295454545454547,
"min": 3.159090909090909,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1113.0,
"min": 139.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.295454545454547,
"min": 3.159090909090909,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1113.0,
"min": 139.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751650950",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751651332"
},
"total": 381.4976623130001,
"count": 1,
"self": 0.43852351800023825,
"children": {
"run_training.setup": {
"total": 0.023029425000004267,
"count": 1,
"self": 0.023029425000004267
},
"TrainerController.start_learning": {
"total": 381.03610936999985,
"count": 1,
"self": 0.34940780001579697,
"children": {
"TrainerController._reset_env": {
"total": 3.1115399860000252,
"count": 1,
"self": 3.1115399860000252
},
"TrainerController.advance": {
"total": 377.486965160984,
"count": 18192,
"self": 0.38934625599881656,
"children": {
"env_step": {
"total": 298.3649773470072,
"count": 18192,
"self": 230.44357231299205,
"children": {
"SubprocessEnvManager._take_step": {
"total": 67.71243508802218,
"count": 18192,
"self": 1.2311744800231281,
"children": {
"TorchPolicy.evaluate": {
"total": 66.48126060799905,
"count": 18192,
"self": 66.48126060799905
}
}
},
"workers": {
"total": 0.20896994599297614,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 379.62575990800724,
"count": 18192,
"is_parallel": true,
"self": 177.38157260200478,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005124139999907129,
"count": 1,
"is_parallel": true,
"self": 0.0035729460003040003,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015511939996031288,
"count": 10,
"is_parallel": true,
"self": 0.0015511939996031288
}
}
},
"UnityEnvironment.step": {
"total": 0.03471657199997935,
"count": 1,
"is_parallel": true,
"self": 0.0005871600001228217,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041499799999655806,
"count": 1,
"is_parallel": true,
"self": 0.00041499799999655806
},
"communicator.exchange": {
"total": 0.03186053599984007,
"count": 1,
"is_parallel": true,
"self": 0.03186053599984007
},
"steps_from_proto": {
"total": 0.001853878000019904,
"count": 1,
"is_parallel": true,
"self": 0.00036100499914937245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014928730008705315,
"count": 10,
"is_parallel": true,
"self": 0.0014928730008705315
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 202.24418730600246,
"count": 18191,
"is_parallel": true,
"self": 9.872606642018582,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.339244836973421,
"count": 18191,
"is_parallel": true,
"self": 5.339244836973421
},
"communicator.exchange": {
"total": 155.33375647001048,
"count": 18191,
"is_parallel": true,
"self": 155.33375647001048
},
"steps_from_proto": {
"total": 31.69857935699997,
"count": 18191,
"is_parallel": true,
"self": 5.712093120998588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.986486236001383,
"count": 181910,
"is_parallel": true,
"self": 25.986486236001383
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 78.73264155797801,
"count": 18192,
"self": 0.44047083495820516,
"children": {
"process_trajectory": {
"total": 34.03118784001913,
"count": 18192,
"self": 33.61856329201942,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4126245479997124,
"count": 4,
"self": 0.4126245479997124
}
}
},
"_update_policy": {
"total": 44.26098288300068,
"count": 90,
"self": 25.78638502399872,
"children": {
"TorchPPOOptimizer.update": {
"total": 18.47459785900196,
"count": 1440,
"self": 18.47459785900196
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1090000953117851e-06,
"count": 1,
"self": 1.1090000953117851e-06
},
"TrainerController._save_models": {
"total": 0.0881953139999041,
"count": 1,
"self": 0.0008131259999117901,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08738218799999231,
"count": 1,
"self": 0.08738218799999231
}
}
}
}
}
}
}