Cicikush's picture
First Push
040db15 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.7065010070800781,
"min": 1.7065010070800781,
"max": 2.888967752456665,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 16218.5859375,
"min": 16218.5859375,
"max": 29610.72265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 7.674254417419434,
"min": 0.03053261525928974,
"max": 7.674254417419434,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1496.4796142578125,
"min": 5.923327445983887,
"max": 1499.783203125,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.mean": {
"value": 0.0971328467130661,
"min": -0.02227313071489334,
"max": 0.09728033095598221,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.sum": {
"value": 18.94090461730957,
"min": -4.320987224578857,
"max": 19.84518814086914,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.477272727272727,
"min": 2.340909090909091,
"max": 19.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 857.0,
"min": 103.0,
"max": 1065.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.477272727272727,
"min": 2.340909090909091,
"max": 19.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 857.0,
"min": 103.0,
"max": 1065.0,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.mean": {
"value": 0.19292364044072616,
"min": 0.002482636821117591,
"max": 0.29165254319933326,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.sum": {
"value": 8.48864017939195,
"min": 0.109236020129174,
"max": 16.04088987596333,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.035323625323180036,
"min": 0.028463529926596617,
"max": 0.04022655982978405,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.035323625323180036,
"min": 0.028463529926596617,
"max": 0.06431329781509132,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15221274950925043,
"min": 0.03654421540890254,
"max": 0.16514313922208898,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.15221274950925043,
"min": 0.03654421540890254,
"max": 0.2702475930838024,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.732096756000008e-06,
"min": 9.732096756000008e-06,
"max": 0.00028693200435600004,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.732096756000008e-06,
"min": 9.732096756000008e-06,
"max": 0.000375864074712,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.103244,
"min": 0.103244,
"max": 0.19564399999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.103244,
"min": 0.103244,
"max": 0.325288,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001718756000000001,
"min": 0.0001718756000000001,
"max": 0.0047826356000000006,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001718756000000001,
"min": 0.0001718756000000001,
"max": 0.006271871200000001,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.mean": {
"value": 0.048212578921925785,
"min": 0.046647274537998086,
"max": 0.23560419240418604,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.sum": {
"value": 0.048212578921925785,
"min": 0.046647274537998086,
"max": 0.23560419240418604,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.mean": {
"value": 1.4837307976741416,
"min": 1.4837307976741416,
"max": 2.8772671877169143,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.sum": {
"value": 1.4837307976741416,
"min": 1.4837307976741416,
"max": 4.69676098169065,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749399693",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749400152"
},
"total": 458.206854603,
"count": 1,
"self": 0.43880511700001534,
"children": {
"run_training.setup": {
"total": 0.030700682999963647,
"count": 1,
"self": 0.030700682999963647
},
"TrainerController.start_learning": {
"total": 457.73734880300003,
"count": 1,
"self": 0.4067580550084813,
"children": {
"TrainerController._reset_env": {
"total": 3.601653650000003,
"count": 1,
"self": 3.601653650000003
},
"TrainerController.advance": {
"total": 453.5602739069916,
"count": 18192,
"self": 0.42927635099442796,
"children": {
"env_step": {
"total": 328.4490970089953,
"count": 18192,
"self": 248.02014741500318,
"children": {
"SubprocessEnvManager._take_step": {
"total": 80.18965977799712,
"count": 18192,
"self": 1.3754845810066172,
"children": {
"TorchPolicy.evaluate": {
"total": 78.8141751969905,
"count": 18192,
"self": 78.8141751969905
}
}
},
"workers": {
"total": 0.23928981599499366,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 456.0009577290032,
"count": 18192,
"is_parallel": true,
"self": 239.22472135201838,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005424476000030154,
"count": 1,
"is_parallel": true,
"self": 0.003780675000029987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016438010000001668,
"count": 10,
"is_parallel": true,
"self": 0.0016438010000001668
}
}
},
"UnityEnvironment.step": {
"total": 0.03824852800005374,
"count": 1,
"is_parallel": true,
"self": 0.0005834369999320188,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047514600009890273,
"count": 1,
"is_parallel": true,
"self": 0.00047514600009890273
},
"communicator.exchange": {
"total": 0.03498555999999553,
"count": 1,
"is_parallel": true,
"self": 0.03498555999999553
},
"steps_from_proto": {
"total": 0.002204385000027287,
"count": 1,
"is_parallel": true,
"self": 0.0004050629999028388,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001799322000124448,
"count": 10,
"is_parallel": true,
"self": 0.001799322000124448
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 216.7762363769848,
"count": 18191,
"is_parallel": true,
"self": 10.13176079095706,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.904310303002148,
"count": 18191,
"is_parallel": true,
"self": 5.904310303002148
},
"communicator.exchange": {
"total": 166.5820600040056,
"count": 18191,
"is_parallel": true,
"self": 166.5820600040056
},
"steps_from_proto": {
"total": 34.15810527901999,
"count": 18191,
"is_parallel": true,
"self": 6.311975488088365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.846129790931627,
"count": 181910,
"is_parallel": true,
"self": 27.846129790931627
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 124.68190054700187,
"count": 18192,
"self": 0.5112059669888822,
"children": {
"process_trajectory": {
"total": 42.46871813801317,
"count": 18192,
"self": 41.67492808001316,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7937900580000132,
"count": 4,
"self": 0.7937900580000132
}
}
},
"_update_policy": {
"total": 81.70197644199982,
"count": 22,
"self": 61.15012646799778,
"children": {
"TorchPPOOptimizer.update": {
"total": 20.55184997400204,
"count": 1122,
"self": 20.55184997400204
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.649999472254422e-07,
"count": 1,
"self": 8.649999472254422e-07
},
"TrainerController._save_models": {
"total": 0.1686623260000033,
"count": 1,
"self": 0.00204728900007467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16661503699992863,
"count": 1,
"self": 0.16661503699992863
}
}
}
}
}
}
}