kenyou's picture
First Push
84f7b47 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.745213747024536,
"min": 2.744229316711426,
"max": 2.8903398513793945,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4106.83984375,
"min": 1932.4278564453125,
"max": 4285.9609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 960.0,
"max": 19992.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 960.0,
"max": 19992.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.6610846519470215,
"min": -0.13039711117744446,
"max": 1.6610846519470215,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 43.188201904296875,
"min": -2.0863537788391113,
"max": 43.188201904296875,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06050091581510417,
"min": 0.06050091581510417,
"max": 0.0862072527947306,
"count": 9
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06050091581510417,
"min": 0.06050091581510417,
"max": 0.0862072527947306,
"count": 9
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21537658046273625,
"min": 0.07611152607326706,
"max": 0.21537658046273625,
"count": 9
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.21537658046273625,
"min": 0.07611152607326706,
"max": 0.21537658046273625,
"count": 9
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.320098559999991e-06,
"min": 4.320098559999991e-06,
"max": 0.00026832001056,
"count": 9
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.320098559999991e-06,
"min": 4.320098559999991e-06,
"max": 0.00026832001056,
"count": 9
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10144,
"min": 0.10144,
"max": 0.18944000000000003,
"count": 9
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10144,
"min": 0.10144,
"max": 0.18944000000000003,
"count": 9
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.185599999999986e-05,
"min": 8.185599999999986e-05,
"max": 0.0044730559999999996,
"count": 9
},
"SnowballTarget.Policy.Beta.sum": {
"value": 8.185599999999986e-05,
"min": 8.185599999999986e-05,
"max": 0.0044730559999999996,
"count": 9
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 9
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 2189.0,
"min": 2189.0,
"max": 2189.0,
"count": 9
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 6.090909090909091,
"min": 2.727272727272727,
"max": 6.090909090909091,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 67.0,
"min": 3.0,
"max": 67.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 6.090909090909091,
"min": 2.727272727272727,
"max": 6.090909090909091,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 67.0,
"min": 3.0,
"max": 67.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748940998",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748941045"
},
"total": 47.74102103600001,
"count": 1,
"self": 0.43361917599997923,
"children": {
"run_training.setup": {
"total": 0.02250043599997298,
"count": 1,
"self": 0.02250043599997298
},
"TrainerController.start_learning": {
"total": 47.284901424000054,
"count": 1,
"self": 0.03384538699856421,
"children": {
"TrainerController._reset_env": {
"total": 3.1589053910000757,
"count": 1,
"self": 3.1589053910000757
},
"TrainerController.advance": {
"total": 43.96093021300146,
"count": 1864,
"self": 0.04282001900389787,
"children": {
"env_step": {
"total": 30.706137628001215,
"count": 1864,
"self": 23.254339909002965,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7.4313657899996315,
"count": 1864,
"self": 0.130267295002227,
"children": {
"TorchPolicy.evaluate": {
"total": 7.3010984949974045,
"count": 1864,
"self": 7.3010984949974045
}
}
},
"workers": {
"total": 0.020431928998618787,
"count": 1864,
"self": 0.0,
"children": {
"worker_root": {
"total": 46.787629155002605,
"count": 1864,
"is_parallel": true,
"self": 26.392337658007023,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004996192000021438,
"count": 1,
"is_parallel": true,
"self": 0.003429449000236673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015667429997847648,
"count": 10,
"is_parallel": true,
"self": 0.0015667429997847648
}
}
},
"UnityEnvironment.step": {
"total": 0.036637963999964995,
"count": 1,
"is_parallel": true,
"self": 0.0005665359999511566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004158599999755097,
"count": 1,
"is_parallel": true,
"self": 0.0004158599999755097
},
"communicator.exchange": {
"total": 0.03380221899999469,
"count": 1,
"is_parallel": true,
"self": 0.03380221899999469
},
"steps_from_proto": {
"total": 0.001853349000043636,
"count": 1,
"is_parallel": true,
"self": 0.0003634480000300755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014899010000135604,
"count": 10,
"is_parallel": true,
"self": 0.0014899010000135604
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 20.395291496995583,
"count": 1863,
"is_parallel": true,
"self": 0.9919147719912189,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5553709310017894,
"count": 1863,
"is_parallel": true,
"self": 0.5553709310017894
},
"communicator.exchange": {
"total": 15.64267938300236,
"count": 1863,
"is_parallel": true,
"self": 15.64267938300236
},
"steps_from_proto": {
"total": 3.2053264110002146,
"count": 1863,
"is_parallel": true,
"self": 0.5720865570042406,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.633239853995974,
"count": 18630,
"is_parallel": true,
"self": 2.633239853995974
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 13.211972565996348,
"count": 1864,
"self": 0.045814624996637576,
"children": {
"process_trajectory": {
"total": 3.3185879899997417,
"count": 1864,
"self": 2.858603540999752,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4599844489999896,
"count": 4,
"self": 0.4599844489999896
}
}
},
"_update_policy": {
"total": 9.847569950999969,
"count": 9,
"self": 3.9125530290006054,
"children": {
"TorchPPOOptimizer.update": {
"total": 5.935016921999363,
"count": 456,
"self": 5.935016921999363
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2360000027911155e-06,
"count": 1,
"self": 1.2360000027911155e-06
},
"TrainerController._save_models": {
"total": 0.13121919699995033,
"count": 1,
"self": 0.0008648020000237011,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13035439499992663,
"count": 1,
"self": 0.13035439499992663
}
}
}
}
}
}
}