haolx's picture
First Push
68f0c90 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8955487012863159,
"min": 0.8948118686676025,
"max": 2.853564739227295,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9053.1015625,
"min": 8701.150390625,
"max": 29223.357421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 399968.0,
"min": 209936.0,
"max": 399968.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 399968.0,
"min": 209936.0,
"max": 399968.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.22802734375,
"min": 0.2574637532234192,
"max": 13.2938232421875,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2579.46533203125,
"min": 49.433040618896484,
"max": 2711.93994140625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07011461774013963,
"min": 0.06358130356124556,
"max": 0.07595940214380914,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28045847096055854,
"min": 0.2543252142449822,
"max": 0.3797970107190457,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21542924686389808,
"min": 0.1385706035470517,
"max": 0.30058827799032717,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8617169874555923,
"min": 0.5542824141882068,
"max": 1.391813202231538,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.957098681000002e-06,
"min": 3.957098681000002e-06,
"max": 0.000145857051381,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.582839472400001e-05,
"min": 1.582839472400001e-05,
"max": 0.00069216026928,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101319,
"min": 0.101319,
"max": 0.14861900000000003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.405276,
"min": 0.405276,
"max": 0.73072,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.581810000000006e-05,
"min": 7.581810000000006e-05,
"max": 0.0024360881000000003,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00030327240000000024,
"min": 0.00030327240000000024,
"max": 0.011562928000000002,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.704545454545453,
"min": 3.6363636363636362,
"max": 25.98181818181818,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1131.0,
"min": 160.0,
"max": 1429.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.704545454545453,
"min": 3.6363636363636362,
"max": 25.98181818181818,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1131.0,
"min": 160.0,
"max": 1429.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706586650",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706587162"
},
"total": 512.0949007670001,
"count": 1,
"self": 0.47622578300024543,
"children": {
"run_training.setup": {
"total": 0.0539380440000059,
"count": 1,
"self": 0.0539380440000059
},
"TrainerController.start_learning": {
"total": 511.5647369399999,
"count": 1,
"self": 0.7514956730024096,
"children": {
"TrainerController._reset_env": {
"total": 2.3224954419999904,
"count": 1,
"self": 2.3224954419999904
},
"TrainerController.advance": {
"total": 508.35818274799726,
"count": 18199,
"self": 0.35644740900397665,
"children": {
"env_step": {
"total": 508.0017353389933,
"count": 18199,
"self": 331.5146749430048,
"children": {
"SubprocessEnvManager._take_step": {
"total": 176.1266867020031,
"count": 18199,
"self": 1.6763091989912482,
"children": {
"TorchPolicy.evaluate": {
"total": 174.45037750301185,
"count": 18199,
"self": 174.45037750301185
}
}
},
"workers": {
"total": 0.3603736939853661,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 509.99262467098515,
"count": 18199,
"is_parallel": true,
"self": 255.9905113189609,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00214061299993773,
"count": 1,
"is_parallel": true,
"self": 0.0006248759998470632,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015157370000906667,
"count": 10,
"is_parallel": true,
"self": 0.0015157370000906667
}
}
},
"UnityEnvironment.step": {
"total": 0.03927482999984022,
"count": 1,
"is_parallel": true,
"self": 0.000713487999746576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043658399999912945,
"count": 1,
"is_parallel": true,
"self": 0.00043658399999912945
},
"communicator.exchange": {
"total": 0.03600957499998003,
"count": 1,
"is_parallel": true,
"self": 0.03600957499998003
},
"steps_from_proto": {
"total": 0.0021151830001144845,
"count": 1,
"is_parallel": true,
"self": 0.00043435500015220896,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016808279999622755,
"count": 10,
"is_parallel": true,
"self": 0.0016808279999622755
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 254.00211335202425,
"count": 18198,
"is_parallel": true,
"self": 11.615472576003185,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.810355305988878,
"count": 18198,
"is_parallel": true,
"self": 5.810355305988878
},
"communicator.exchange": {
"total": 199.92710642801785,
"count": 18198,
"is_parallel": true,
"self": 199.92710642801785
},
"steps_from_proto": {
"total": 36.64917904201434,
"count": 18198,
"is_parallel": true,
"self": 7.209387495973033,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.439791546041306,
"count": 181980,
"is_parallel": true,
"self": 29.439791546041306
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002489560001777136,
"count": 1,
"self": 0.0002489560001777136,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 502.01383666296533,
"count": 762377,
"is_parallel": true,
"self": 16.35181663889466,
"children": {
"process_trajectory": {
"total": 274.5566146790702,
"count": 762377,
"is_parallel": true,
"self": 273.66009452307003,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8965201560001788,
"count": 4,
"is_parallel": true,
"self": 0.8965201560001788
}
}
},
"_update_policy": {
"total": 211.10540534500046,
"count": 90,
"is_parallel": true,
"self": 60.61084359299434,
"children": {
"TorchPPOOptimizer.update": {
"total": 150.49456175200612,
"count": 4587,
"is_parallel": true,
"self": 150.49456175200612
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1323141210000358,
"count": 1,
"self": 0.002316863000032754,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12999725800000306,
"count": 1,
"self": 0.12999725800000306
}
}
}
}
}
}
}