Redart71's picture
First Push
0ed5725 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5254992246627808,
"min": 0.5015815496444702,
"max": 0.7530991435050964,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5040.58837890625,
"min": 4894.92529296875,
"max": 7669.703125,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.367341041564941,
"min": 12.085987091064453,
"max": 13.57891845703125,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2726.9375,
"min": 2320.509521484375,
"max": 2764.7763671875,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07120463558519488,
"min": 0.058896382402499683,
"max": 0.07685730606210521,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3560231779259744,
"min": 0.23558552960999873,
"max": 0.3651217979717972,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18280066923767913,
"min": 0.15904425190506027,
"max": 0.20083134256157223,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9140033461883956,
"min": 0.6361770076202411,
"max": 0.9642324411109382,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.1056989647999945e-06,
"min": 3.1056989647999945e-06,
"max": 0.00017668564110480002,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5528494823999972e-05,
"min": 1.5528494823999972e-05,
"max": 0.000853728215424,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10103520000000002,
"min": 0.10103520000000002,
"max": 0.15889520000000001,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5051760000000001,
"min": 0.4120608,
"max": 0.7845760000000002,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.165647999999992e-05,
"min": 6.165647999999992e-05,
"max": 0.002948870480000001,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003082823999999996,
"min": 0.0003082823999999996,
"max": 0.0142503424,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.254545454545454,
"min": 24.15909090909091,
"max": 26.70909090909091,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1444.0,
"min": 1063.0,
"max": 1469.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.254545454545454,
"min": 24.15909090909091,
"max": 26.70909090909091,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1444.0,
"min": 1063.0,
"max": 1469.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744794992",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744795606"
},
"total": 613.9766473329998,
"count": 1,
"self": 0.42636831499976324,
"children": {
"run_training.setup": {
"total": 0.020260753000002296,
"count": 1,
"self": 0.020260753000002296
},
"TrainerController.start_learning": {
"total": 613.5300182650001,
"count": 1,
"self": 0.4642594350075342,
"children": {
"TrainerController._reset_env": {
"total": 2.0167149079998126,
"count": 1,
"self": 2.0167149079998126
},
"TrainerController.advance": {
"total": 610.9616803559927,
"count": 27264,
"self": 0.48663881705647327,
"children": {
"env_step": {
"total": 430.33856703599736,
"count": 27264,
"self": 328.26124668298166,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.78582413599838,
"count": 27264,
"self": 1.8210247900153718,
"children": {
"TorchPolicy.evaluate": {
"total": 99.96479934598301,
"count": 27264,
"self": 99.96479934598301
}
}
},
"workers": {
"total": 0.2914962170173112,
"count": 27264,
"self": 0.0,
"children": {
"worker_root": {
"total": 611.5841291410438,
"count": 27264,
"is_parallel": true,
"self": 322.83378687806794,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021035359998222702,
"count": 1,
"is_parallel": true,
"self": 0.0006520209999507642,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001451514999871506,
"count": 10,
"is_parallel": true,
"self": 0.001451514999871506
}
}
},
"UnityEnvironment.step": {
"total": 0.03561748800007081,
"count": 1,
"is_parallel": true,
"self": 0.0005518640000445885,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037792900002386887,
"count": 1,
"is_parallel": true,
"self": 0.00037792900002386887
},
"communicator.exchange": {
"total": 0.03293755599997894,
"count": 1,
"is_parallel": true,
"self": 0.03293755599997894
},
"steps_from_proto": {
"total": 0.0017501390000234096,
"count": 1,
"is_parallel": true,
"self": 0.00032810400057314837,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014220349994502612,
"count": 10,
"is_parallel": true,
"self": 0.0014220349994502612
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 288.7503422629759,
"count": 27263,
"is_parallel": true,
"self": 13.965315862040825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.04956736399049,
"count": 27263,
"is_parallel": true,
"self": 8.04956736399049
},
"communicator.exchange": {
"total": 221.67666052095365,
"count": 27263,
"is_parallel": true,
"self": 221.67666052095365
},
"steps_from_proto": {
"total": 45.05879851599093,
"count": 27263,
"is_parallel": true,
"self": 7.833092072944737,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.22570644304619,
"count": 272630,
"is_parallel": true,
"self": 37.22570644304619
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 180.13647450293888,
"count": 27264,
"self": 0.5512165378934242,
"children": {
"process_trajectory": {
"total": 38.8073247140469,
"count": 27264,
"self": 38.23470988604777,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5726148279991321,
"count": 6,
"self": 0.5726148279991321
}
}
},
"_update_policy": {
"total": 140.77793325099856,
"count": 136,
"self": 56.05927576300928,
"children": {
"TorchPPOOptimizer.update": {
"total": 84.71865748798928,
"count": 6933,
"self": 84.71865748798928
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.235000127053354e-06,
"count": 1,
"self": 1.235000127053354e-06
},
"TrainerController._save_models": {
"total": 0.08736233099989477,
"count": 1,
"self": 0.0010968009996759065,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08626553000021886,
"count": 1,
"self": 0.08626553000021886
}
}
}
}
}
}
}