makaveli10's picture
First Push
d0dc1de verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8026812076568604,
"min": 0.7886124849319458,
"max": 2.8343505859375,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7628.68212890625,
"min": 7628.68212890625,
"max": 28933.05078125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.780665397644043,
"min": 0.3380882441997528,
"max": 12.865677833557129,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2492.229736328125,
"min": 65.58911895751953,
"max": 2637.4638671875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06731333080068569,
"min": 0.06384618304950622,
"max": 0.07343111980989503,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26925332320274276,
"min": 0.2609081270324988,
"max": 0.36715559904947515,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1947855675848676,
"min": 0.11319519553502438,
"max": 0.2839653474443099,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7791422703394704,
"min": 0.45278078214009754,
"max": 1.4144159578809552,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.954545454545453,
"min": 3.3636363636363638,
"max": 25.5,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1098.0,
"min": 148.0,
"max": 1382.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.954545454545453,
"min": 3.3636363636363638,
"max": 25.5,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1098.0,
"min": 148.0,
"max": 1382.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736923599",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736924015"
},
"total": 415.4360149700001,
"count": 1,
"self": 0.425387591000117,
"children": {
"run_training.setup": {
"total": 0.0766957010000624,
"count": 1,
"self": 0.0766957010000624
},
"TrainerController.start_learning": {
"total": 414.9339316779999,
"count": 1,
"self": 0.342340035006373,
"children": {
"TrainerController._reset_env": {
"total": 2.454583337000031,
"count": 1,
"self": 2.454583337000031
},
"TrainerController.advance": {
"total": 412.0546830819935,
"count": 18192,
"self": 0.3424097819955705,
"children": {
"env_step": {
"total": 290.814600488006,
"count": 18192,
"self": 222.95925636501363,
"children": {
"SubprocessEnvManager._take_step": {
"total": 67.65334099101062,
"count": 18192,
"self": 1.2052211000086572,
"children": {
"TorchPolicy.evaluate": {
"total": 66.44811989100197,
"count": 18192,
"self": 66.44811989100197
}
}
},
"workers": {
"total": 0.202003131981769,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 413.64994846900936,
"count": 18192,
"is_parallel": true,
"self": 218.2215751020036,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003031073999977707,
"count": 1,
"is_parallel": true,
"self": 0.0008572799999910785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021737939999866285,
"count": 10,
"is_parallel": true,
"self": 0.0021737939999866285
}
}
},
"UnityEnvironment.step": {
"total": 0.03793781599995327,
"count": 1,
"is_parallel": true,
"self": 0.0006260930000507869,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043838199997026095,
"count": 1,
"is_parallel": true,
"self": 0.00043838199997026095
},
"communicator.exchange": {
"total": 0.034911469000007855,
"count": 1,
"is_parallel": true,
"self": 0.034911469000007855
},
"steps_from_proto": {
"total": 0.0019618719999243694,
"count": 1,
"is_parallel": true,
"self": 0.0003965650000736787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015653069998506908,
"count": 10,
"is_parallel": true,
"self": 0.0015653069998506908
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 195.42837336700575,
"count": 18191,
"is_parallel": true,
"self": 9.751071055995453,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.390594866007518,
"count": 18191,
"is_parallel": true,
"self": 5.390594866007518
},
"communicator.exchange": {
"total": 149.0266754119931,
"count": 18191,
"is_parallel": true,
"self": 149.0266754119931
},
"steps_from_proto": {
"total": 31.26003203300968,
"count": 18191,
"is_parallel": true,
"self": 5.614205618008782,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.645826415000897,
"count": 181910,
"is_parallel": true,
"self": 25.645826415000897
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 120.89767281199192,
"count": 18192,
"self": 0.3761557389846075,
"children": {
"process_trajectory": {
"total": 28.51204260400766,
"count": 18192,
"self": 28.094497937007986,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4175446669996745,
"count": 4,
"self": 0.4175446669996745
}
}
},
"_update_policy": {
"total": 92.00947446899966,
"count": 90,
"self": 38.03791924199925,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.97155522700041,
"count": 4587,
"self": 53.97155522700041
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.48999968386488e-07,
"count": 1,
"self": 9.48999968386488e-07
},
"TrainerController._save_models": {
"total": 0.08232427500001904,
"count": 1,
"self": 0.0008854110001266235,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08143886399989242,
"count": 1,
"self": 0.08143886399989242
}
}
}
}
}
}
}