wangyuhao's picture
First Push
9e1bb56 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.757451593875885,
"min": 0.7324873208999634,
"max": 0.8686289191246033,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7782.0576171875,
"min": 7090.4775390625,
"max": 8817.8564453125,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 299944.0,
"min": 209936.0,
"max": 299944.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 299944.0,
"min": 209936.0,
"max": 299944.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.217594146728516,
"min": 13.026618957519531,
"max": 13.375056266784668,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2696.38916015625,
"min": 2501.11083984375,
"max": 2728.511474609375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06772717904052972,
"min": 0.060659944293872156,
"max": 0.07285553590650214,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3386358952026486,
"min": 0.2508493969935979,
"max": 0.36427767953251067,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19245753621353823,
"min": 0.18801876113695254,
"max": 0.23928365085328768,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9622876810676911,
"min": 0.7782216938395126,
"max": 1.0662853764552696,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.3760982079999995e-06,
"min": 5.3760982079999995e-06,
"max": 9.4476068508e-05,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.688049104e-05,
"min": 2.688049104e-05,
"max": 0.0004228803590399999,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10179200000000002,
"min": 0.10179200000000002,
"max": 0.131492,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5089600000000001,
"min": 0.420368,
"max": 0.64096,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.942080000000004e-05,
"min": 9.942080000000004e-05,
"max": 0.0015814508000000001,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004971040000000002,
"min": 0.0004971040000000002,
"max": 0.007083904000000002,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.12727272727273,
"min": 25.10909090909091,
"max": 26.254545454545454,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1437.0,
"min": 1121.0,
"max": 1444.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.12727272727273,
"min": 25.10909090909091,
"max": 26.254545454545454,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1437.0,
"min": 1121.0,
"max": 1444.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711384468",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --resume --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711384726"
},
"total": 257.6430599260002,
"count": 1,
"self": 0.4349406720002662,
"children": {
"run_training.setup": {
"total": 0.05437111800006278,
"count": 1,
"self": 0.05437111800006278
},
"TrainerController.start_learning": {
"total": 257.1537481359999,
"count": 1,
"self": 0.41686792500877345,
"children": {
"TrainerController._reset_env": {
"total": 1.9647755010000765,
"count": 1,
"self": 1.9647755010000765
},
"TrainerController.advance": {
"total": 254.67410649799126,
"count": 9133,
"self": 0.1751358019878353,
"children": {
"env_step": {
"total": 254.49897069600343,
"count": 9133,
"self": 165.55945719699912,
"children": {
"SubprocessEnvManager._take_step": {
"total": 88.76162521399692,
"count": 9133,
"self": 0.9026049529859392,
"children": {
"TorchPolicy.evaluate": {
"total": 87.85902026101098,
"count": 9133,
"self": 87.85902026101098
}
}
},
"workers": {
"total": 0.17788828500738418,
"count": 9133,
"self": 0.0,
"children": {
"worker_root": {
"total": 256.34635996199063,
"count": 9133,
"is_parallel": true,
"self": 126.36897504398553,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024461640000481566,
"count": 1,
"is_parallel": true,
"self": 0.0006933590000244294,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017528050000237272,
"count": 10,
"is_parallel": true,
"self": 0.0017528050000237272
}
}
},
"UnityEnvironment.step": {
"total": 0.038990054000123564,
"count": 1,
"is_parallel": true,
"self": 0.0008530089999112533,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045544600016000913,
"count": 1,
"is_parallel": true,
"self": 0.00045544600016000913
},
"communicator.exchange": {
"total": 0.03549220900004002,
"count": 1,
"is_parallel": true,
"self": 0.03549220900004002
},
"steps_from_proto": {
"total": 0.0021893900000122812,
"count": 1,
"is_parallel": true,
"self": 0.0004315349995067663,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001757855000505515,
"count": 10,
"is_parallel": true,
"self": 0.001757855000505515
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 129.9773849180051,
"count": 9132,
"is_parallel": true,
"self": 5.860590331981712,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.1413840949933274,
"count": 9132,
"is_parallel": true,
"self": 3.1413840949933274
},
"communicator.exchange": {
"total": 101.34071565402132,
"count": 9132,
"is_parallel": true,
"self": 101.34071565402132
},
"steps_from_proto": {
"total": 19.634694837008738,
"count": 9132,
"is_parallel": true,
"self": 3.828898483987814,
"children": {
"_process_rank_one_or_two_observation": {
"total": 15.805796353020924,
"count": 91320,
"is_parallel": true,
"self": 15.805796353020924
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0004231239997807279,
"count": 1,
"self": 0.0004231239997807279,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 251.36399706591033,
"count": 386849,
"is_parallel": true,
"self": 8.726530653870896,
"children": {
"process_trajectory": {
"total": 139.703411889039,
"count": 386849,
"is_parallel": true,
"self": 139.3624208940389,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3409909950000838,
"count": 2,
"is_parallel": true,
"self": 0.3409909950000838
}
}
},
"_update_policy": {
"total": 102.93405452300044,
"count": 45,
"is_parallel": true,
"self": 27.854383290995884,
"children": {
"TorchPPOOptimizer.update": {
"total": 75.07967123200456,
"count": 2292,
"is_parallel": true,
"self": 75.07967123200456
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09757508799998504,
"count": 1,
"self": 0.0012657869999657123,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09630930100001933,
"count": 1,
"self": 0.09630930100001933
}
}
}
}
}
}
}