Snorlax's picture
First Push
e7291c5 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9414846897125244,
"min": 0.9414846897125244,
"max": 2.837233066558838,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9610.67578125,
"min": 9185.076171875,
"max": 28962.474609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.867913246154785,
"min": 0.2730344235897064,
"max": 12.867913246154785,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2509.2431640625,
"min": 52.422607421875,
"max": 2615.4228515625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06885125869245944,
"min": 0.064106787175771,
"max": 0.07653272599461214,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27540503476983774,
"min": 0.25653607786436766,
"max": 0.36889627000246217,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21730695978975764,
"min": 0.13253694556781329,
"max": 0.3082595127470353,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8692278391590306,
"min": 0.5301477822712531,
"max": 1.5412975637351765,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.890097370000006e-06,
"min": 7.890097370000006e-06,
"max": 0.00029169000276999994,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.1560389480000026e-05,
"min": 3.1560389480000026e-05,
"max": 0.0013842000385999999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10263000000000001,
"min": 0.10263000000000001,
"max": 0.19723000000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41052000000000005,
"min": 0.41052000000000005,
"max": 0.9614,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014123700000000008,
"min": 0.00014123700000000008,
"max": 0.004861776999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005649480000000003,
"min": 0.0005649480000000003,
"max": 0.023073859999999995,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.227272727272727,
"min": 3.6136363636363638,
"max": 25.581818181818182,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1110.0,
"min": 159.0,
"max": 1407.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.227272727272727,
"min": 3.6136363636363638,
"max": 25.581818181818182,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1110.0,
"min": 159.0,
"max": 1407.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1733052424",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/zhangsz/anaconda3/envs/deep_rl_course/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1733052763"
},
"total": 338.330125186003,
"count": 1,
"self": 0.21966511600476224,
"children": {
"run_training.setup": {
"total": 0.010893323000345845,
"count": 1,
"self": 0.010893323000345845
},
"TrainerController.start_learning": {
"total": 338.0995667469979,
"count": 1,
"self": 0.27753141995708575,
"children": {
"TrainerController._reset_env": {
"total": 1.303241983998305,
"count": 1,
"self": 1.303241983998305
},
"TrainerController.advance": {
"total": 336.46508998504214,
"count": 18192,
"self": 0.26074503542258753,
"children": {
"env_step": {
"total": 244.41640083280436,
"count": 18192,
"self": 171.80774939095863,
"children": {
"SubprocessEnvManager._take_step": {
"total": 72.42590474493409,
"count": 18192,
"self": 0.8186111925533623,
"children": {
"TorchPolicy.evaluate": {
"total": 71.60729355238072,
"count": 18192,
"self": 71.60729355238072
}
}
},
"workers": {
"total": 0.1827466969116358,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 337.2901172958118,
"count": 18192,
"is_parallel": true,
"self": 181.64054854560527,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010098610000568442,
"count": 1,
"is_parallel": true,
"self": 0.000308087001030799,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007017739990260452,
"count": 10,
"is_parallel": true,
"self": 0.0007017739990260452
}
}
},
"UnityEnvironment.step": {
"total": 0.01905548100330634,
"count": 1,
"is_parallel": true,
"self": 0.00018817700401996262,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001389070021104999,
"count": 1,
"is_parallel": true,
"self": 0.0001389070021104999
},
"communicator.exchange": {
"total": 0.018159219998779008,
"count": 1,
"is_parallel": true,
"self": 0.018159219998779008
},
"steps_from_proto": {
"total": 0.0005691769983968697,
"count": 1,
"is_parallel": true,
"self": 0.00013573500109487213,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004334419973019976,
"count": 10,
"is_parallel": true,
"self": 0.0004334419973019976
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 155.64956875020653,
"count": 18191,
"is_parallel": true,
"self": 3.1611836370902893,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.8261456271029601,
"count": 18191,
"is_parallel": true,
"self": 1.8261456271029601
},
"communicator.exchange": {
"total": 141.4710012380965,
"count": 18191,
"is_parallel": true,
"self": 141.4710012380965
},
"steps_from_proto": {
"total": 9.191238247916772,
"count": 18191,
"is_parallel": true,
"self": 2.1033415743513615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.08789667356541,
"count": 181910,
"is_parallel": true,
"self": 7.08789667356541
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 91.7879441168152,
"count": 18192,
"self": 0.33735308186805923,
"children": {
"process_trajectory": {
"total": 20.096012293954118,
"count": 18192,
"self": 19.86077834495154,
"children": {
"RLTrainer._checkpoint": {
"total": 0.235233949002577,
"count": 4,
"self": 0.235233949002577
}
}
},
"_update_policy": {
"total": 71.35457874099302,
"count": 90,
"self": 21.752368117038714,
"children": {
"TorchPPOOptimizer.update": {
"total": 49.602210623954306,
"count": 4587,
"self": 49.602210623954306
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.5299824452959e-07,
"count": 1,
"self": 5.5299824452959e-07
},
"TrainerController._save_models": {
"total": 0.05370280500210356,
"count": 1,
"self": 0.0005373090025386773,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05316549599956488,
"count": 1,
"self": 0.05316549599956488
}
}
}
}
}
}
}