Frankie2501's picture
First Push
9642362 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5598068237304688,
"min": 0.5344870090484619,
"max": 2.885925054550171,
"count": 100
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5714.5078125,
"min": 5123.64404296875,
"max": 29459.5234375,
"count": 100
},
"SnowballTarget.Step.mean": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Step.sum": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.575998306274414,
"min": 0.41134241223335266,
"max": 13.679601669311523,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2647.319580078125,
"min": 79.80043029785156,
"max": 2804.318359375,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06776734094969201,
"min": 0.055697942451932284,
"max": 0.07521760597535154,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27106936379876806,
"min": 0.22279176980772913,
"max": 0.3702068779019493,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19524452148699292,
"min": 0.09916690595826938,
"max": 0.2735037369032701,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7809780859479717,
"min": 0.3966676238330775,
"max": 1.3590929327057857,
"count": 100
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.588995412000008e-07,
"min": 4.588995412000008e-07,
"max": 9.94588005412e-05,
"count": 100
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.8355981648000033e-06,
"min": 1.8355981648000033e-06,
"max": 0.000492344007656,
"count": 100
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10045880000000001,
"min": 0.10045880000000001,
"max": 0.1994588,
"count": 100
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40183520000000006,
"min": 0.40183520000000006,
"max": 0.992344,
"count": 100
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.289412000000004e-05,
"min": 3.289412000000004e-05,
"max": 0.00497299412,
"count": 100
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00013157648000000017,
"min": 0.00013157648000000017,
"max": 0.0246179656,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.42222222222222,
"min": 3.1136363636363638,
"max": 26.954545454545453,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1189.0,
"min": 137.0,
"max": 1482.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.42222222222222,
"min": 3.1136363636363638,
"max": 26.954545454545453,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1189.0,
"min": 137.0,
"max": 1482.0,
"count": 100
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739868912",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739870941"
},
"total": 2028.8912732499998,
"count": 1,
"self": 0.436763739999833,
"children": {
"run_training.setup": {
"total": 0.030824186000018017,
"count": 1,
"self": 0.030824186000018017
},
"TrainerController.start_learning": {
"total": 2028.423685324,
"count": 1,
"self": 1.4985105019934508,
"children": {
"TrainerController._reset_env": {
"total": 3.397486055000172,
"count": 1,
"self": 3.397486055000172
},
"TrainerController.advance": {
"total": 2023.4399491100064,
"count": 90928,
"self": 1.5637078050081072,
"children": {
"env_step": {
"total": 1421.4469410259328,
"count": 90928,
"self": 1077.191548297962,
"children": {
"SubprocessEnvManager._take_step": {
"total": 343.30088535295636,
"count": 90928,
"self": 5.769076685929349,
"children": {
"TorchPolicy.evaluate": {
"total": 337.531808667027,
"count": 90928,
"self": 337.531808667027
}
}
},
"workers": {
"total": 0.9545073750143729,
"count": 90928,
"self": 0.0,
"children": {
"worker_root": {
"total": 2022.8117548330454,
"count": 90928,
"is_parallel": true,
"self": 1074.5896754870018,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0059209400001236645,
"count": 1,
"is_parallel": true,
"self": 0.004382140000188883,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015387999999347812,
"count": 10,
"is_parallel": true,
"self": 0.0015387999999347812
}
}
},
"UnityEnvironment.step": {
"total": 0.06223409900007937,
"count": 1,
"is_parallel": true,
"self": 0.0005929930002821493,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037452599985954294,
"count": 1,
"is_parallel": true,
"self": 0.00037452599985954294
},
"communicator.exchange": {
"total": 0.05925648299989916,
"count": 1,
"is_parallel": true,
"self": 0.05925648299989916
},
"steps_from_proto": {
"total": 0.002010097000038513,
"count": 1,
"is_parallel": true,
"self": 0.00036208200049259176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016480149995459215,
"count": 10,
"is_parallel": true,
"self": 0.0016480149995459215
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 948.2220793460435,
"count": 90927,
"is_parallel": true,
"self": 46.490490747924014,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.515987149997272,
"count": 90927,
"is_parallel": true,
"self": 26.515987149997272
},
"communicator.exchange": {
"total": 725.9183714160883,
"count": 90927,
"is_parallel": true,
"self": 725.9183714160883
},
"steps_from_proto": {
"total": 149.2972300320339,
"count": 90927,
"is_parallel": true,
"self": 25.973760463020426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 123.32346956901347,
"count": 909270,
"is_parallel": true,
"self": 123.32346956901347
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 600.4293002790655,
"count": 90928,
"self": 1.7623362780313983,
"children": {
"process_trajectory": {
"total": 128.07997309903453,
"count": 90928,
"self": 125.9892238680352,
"children": {
"RLTrainer._checkpoint": {
"total": 2.090749230999336,
"count": 20,
"self": 2.090749230999336
}
}
},
"_update_policy": {
"total": 470.58699090199957,
"count": 454,
"self": 185.04720331502358,
"children": {
"TorchPPOOptimizer.update": {
"total": 285.539787586976,
"count": 23151,
"self": 285.539787586976
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.990001904545352e-07,
"count": 1,
"self": 7.990001904545352e-07
},
"TrainerController._save_models": {
"total": 0.08773885799973868,
"count": 1,
"self": 0.0009371189999001217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08680173899983856,
"count": 1,
"self": 0.08680173899983856
}
}
}
}
}
}
}