RollingBeatle's picture
First Push
784c78f verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8882731795310974,
"min": 0.8882731795310974,
"max": 2.8560502529144287,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8793.904296875,
"min": 8793.904296875,
"max": 29845.724609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199950.0,
"min": 9950.0,
"max": 199950.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199950.0,
"min": 9950.0,
"max": 199950.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.088932037353516,
"min": 0.2997612953186035,
"max": 13.088932037353516,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2617.786376953125,
"min": 59.65250015258789,
"max": 2617.786376953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.88,
"min": 3.0681818181818183,
"max": 25.88,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1294.0,
"min": 135.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.88,
"min": 3.0681818181818183,
"max": 25.88,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1294.0,
"min": 135.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06593555638684455,
"min": 0.06355253701445226,
"max": 0.07748623172841797,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2637422255473782,
"min": 0.26049270548266545,
"max": 0.3663019639443876,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2061760490577595,
"min": 0.11387265280035197,
"max": 0.29847624038948734,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.824704196231038,
"min": 0.4554906112014079,
"max": 1.4923812019474367,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.950097350000008e-06,
"min": 7.950097350000008e-06,
"max": 0.00029175000274999995,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.180038940000003e-05,
"min": 3.180038940000003e-05,
"max": 0.0013845000385,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10265,
"min": 0.10265,
"max": 0.19725000000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4106,
"min": 0.4106,
"max": 0.9615,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014223500000000013,
"min": 0.00014223500000000013,
"max": 0.004862775,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005689400000000005,
"min": 0.0005689400000000005,
"max": 0.023078849999999998,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738029388",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738029812"
},
"total": 423.871274587,
"count": 1,
"self": 0.43790016099990225,
"children": {
"run_training.setup": {
"total": 0.02198202300007779,
"count": 1,
"self": 0.02198202300007779
},
"TrainerController.start_learning": {
"total": 423.41139240300004,
"count": 1,
"self": 0.3206611690096679,
"children": {
"TrainerController._reset_env": {
"total": 2.8801579729999958,
"count": 1,
"self": 2.8801579729999958
},
"TrainerController.advance": {
"total": 420.12302331999047,
"count": 18200,
"self": 0.3618016369802035,
"children": {
"env_step": {
"total": 295.2849689950011,
"count": 18200,
"self": 225.2303728090011,
"children": {
"SubprocessEnvManager._take_step": {
"total": 69.85859030899951,
"count": 18200,
"self": 1.2438960030023054,
"children": {
"TorchPolicy.evaluate": {
"total": 68.6146943059972,
"count": 18200,
"self": 68.6146943059972
}
}
},
"workers": {
"total": 0.1960058770005162,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 421.9896186689921,
"count": 18200,
"is_parallel": true,
"self": 224.52761052898916,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005155141999921398,
"count": 1,
"is_parallel": true,
"self": 0.003444188999992548,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017109529999288497,
"count": 10,
"is_parallel": true,
"self": 0.0017109529999288497
}
}
},
"UnityEnvironment.step": {
"total": 0.057522064000067985,
"count": 1,
"is_parallel": true,
"self": 0.000770421000083843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041302799991171923,
"count": 1,
"is_parallel": true,
"self": 0.00041302799991171923
},
"communicator.exchange": {
"total": 0.054600244000084786,
"count": 1,
"is_parallel": true,
"self": 0.054600244000084786
},
"steps_from_proto": {
"total": 0.0017383709999876373,
"count": 1,
"is_parallel": true,
"self": 0.0003612299998394519,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013771410001481854,
"count": 10,
"is_parallel": true,
"self": 0.0013771410001481854
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 197.46200814000292,
"count": 18199,
"is_parallel": true,
"self": 9.57144913099205,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.318880147007235,
"count": 18199,
"is_parallel": true,
"self": 5.318880147007235
},
"communicator.exchange": {
"total": 151.71470592199682,
"count": 18199,
"is_parallel": true,
"self": 151.71470592199682
},
"steps_from_proto": {
"total": 30.856972940006813,
"count": 18199,
"is_parallel": true,
"self": 5.559763802011162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.29720913799565,
"count": 181990,
"is_parallel": true,
"self": 25.29720913799565
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 124.47625268800914,
"count": 18200,
"self": 0.4141615050106111,
"children": {
"process_trajectory": {
"total": 28.617787062998673,
"count": 18200,
"self": 28.097481462998644,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5203056000000288,
"count": 4,
"self": 0.5203056000000288
}
}
},
"_update_policy": {
"total": 95.44430411999986,
"count": 90,
"self": 38.89122391100511,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.55308020899474,
"count": 4590,
"self": 56.55308020899474
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0199998996540671e-06,
"count": 1,
"self": 1.0199998996540671e-06
},
"TrainerController._save_models": {
"total": 0.08754892100000689,
"count": 1,
"self": 0.0009609960000034334,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08658792500000345,
"count": 1,
"self": 0.08658792500000345
}
}
}
}
}
}
}