zhngq's picture
First Push
36ed0b8 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.2834779024124146,
"min": 1.2834779024124146,
"max": 2.8505074977874756,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13214.6884765625,
"min": 12960.5,
"max": 29097.98046875,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.088072776794434,
"min": 0.42709881067276,
"max": 10.088072776794434,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2057.966796875,
"min": 82.85717010498047,
"max": 2057.966796875,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07123042440073855,
"min": 0.0647622538276254,
"max": 0.07166613244323443,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3561521220036928,
"min": 0.2710937809525229,
"max": 0.3561521220036928,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24531365977782832,
"min": 0.12783101504650332,
"max": 0.3004332551477002,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.2265682988891415,
"min": 0.5113240601860133,
"max": 1.4016347360961579,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6464094511999996e-05,
"min": 1.6464094511999996e-05,
"max": 0.000283764005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.232047255999999e-05,
"min": 8.232047255999999e-05,
"max": 0.00127032007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.145454545454545,
"min": 3.75,
"max": 21.145454545454545,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1163.0,
"min": 165.0,
"max": 1163.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.145454545454545,
"min": 3.75,
"max": 21.145454545454545,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1163.0,
"min": 165.0,
"max": 1163.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1750947466",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1750947672"
},
"total": 205.64844776400003,
"count": 1,
"self": 0.4325169869999854,
"children": {
"run_training.setup": {
"total": 0.023165067000036288,
"count": 1,
"self": 0.023165067000036288
},
"TrainerController.start_learning": {
"total": 205.19276571,
"count": 1,
"self": 0.160835964996636,
"children": {
"TrainerController._reset_env": {
"total": 3.2537330589999556,
"count": 1,
"self": 3.2537330589999556
},
"TrainerController.advance": {
"total": 201.7000612060035,
"count": 9128,
"self": 0.17056222400071874,
"children": {
"env_step": {
"total": 144.13597583000592,
"count": 9128,
"self": 110.75488516798828,
"children": {
"SubprocessEnvManager._take_step": {
"total": 33.28531064300694,
"count": 9128,
"self": 0.6052321050062801,
"children": {
"TorchPolicy.evaluate": {
"total": 32.68007853800066,
"count": 9128,
"self": 32.68007853800066
}
}
},
"workers": {
"total": 0.09578001901070365,
"count": 9128,
"self": 0.0,
"children": {
"worker_root": {
"total": 204.50025811300452,
"count": 9128,
"is_parallel": true,
"self": 107.07476803500458,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005435879999936333,
"count": 1,
"is_parallel": true,
"self": 0.003966450999996596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001469428999939737,
"count": 10,
"is_parallel": true,
"self": 0.001469428999939737
}
}
},
"UnityEnvironment.step": {
"total": 0.03437481299999945,
"count": 1,
"is_parallel": true,
"self": 0.0005933190000178001,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003771359999973356,
"count": 1,
"is_parallel": true,
"self": 0.0003771359999973356
},
"communicator.exchange": {
"total": 0.03160597700002654,
"count": 1,
"is_parallel": true,
"self": 0.03160597700002654
},
"steps_from_proto": {
"total": 0.0017983809999577716,
"count": 1,
"is_parallel": true,
"self": 0.0003566359999922497,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014417449999655219,
"count": 10,
"is_parallel": true,
"self": 0.0014417449999655219
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 97.42549007799994,
"count": 9127,
"is_parallel": true,
"self": 4.796312393001699,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.593081057999939,
"count": 9127,
"is_parallel": true,
"self": 2.593081057999939
},
"communicator.exchange": {
"total": 74.94763310900044,
"count": 9127,
"is_parallel": true,
"self": 74.94763310900044
},
"steps_from_proto": {
"total": 15.088463517997866,
"count": 9127,
"is_parallel": true,
"self": 2.6505925999881583,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.437870918009708,
"count": 91270,
"is_parallel": true,
"self": 12.437870918009708
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 57.393523151996874,
"count": 9128,
"self": 0.20506975299508667,
"children": {
"process_trajectory": {
"total": 12.501729880001903,
"count": 9128,
"self": 12.27152553400174,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23020434600016415,
"count": 2,
"self": 0.23020434600016415
}
}
},
"_update_policy": {
"total": 44.686723518999884,
"count": 45,
"self": 18.1674481289989,
"children": {
"TorchPPOOptimizer.update": {
"total": 26.519275390000985,
"count": 2292,
"self": 26.519275390000985
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.000999986899842e-06,
"count": 1,
"self": 1.000999986899842e-06
},
"TrainerController._save_models": {
"total": 0.07813447899991388,
"count": 1,
"self": 0.0007989619999761999,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07733551699993768,
"count": 1,
"self": 0.07733551699993768
}
}
}
}
}
}
}