Absie's picture
First Push
561c0ee
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.2522377967834473,
"min": 1.2522377967834473,
"max": 2.8821299076080322,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11308.9599609375,
"min": 11308.9599609375,
"max": 31982.77734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.1435742378234863,
"min": 0.10417458415031433,
"max": 2.1435742378234863,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 216.50099182128906,
"min": 10.104934692382812,
"max": 216.50099182128906,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.307692307692307,
"min": 2.977272727272727,
"max": 21.307692307692307,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1108.0,
"min": 131.0,
"max": 1108.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.307692307692307,
"min": 2.977272727272727,
"max": 21.307692307692307,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1108.0,
"min": 131.0,
"max": 1108.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06618703596011577,
"min": 0.054635919836054925,
"max": 0.0682054325252775,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.1985611078803473,
"min": 0.12489188359280098,
"max": 0.27282173010111,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.13462365406171306,
"min": 0.05625650809720309,
"max": 0.16300076826587404,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4038709621851392,
"min": 0.11251301619440618,
"max": 0.6218658217075296,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.9960980026666655e-06,
"min": 2.9960980026666655e-06,
"max": 0.000146172002552,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.988294007999996e-06,
"min": 8.988294007999996e-06,
"max": 0.000554988030008,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10199733333333331,
"min": 0.10199733333333331,
"max": 0.197448,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.30599199999999993,
"min": 0.2650960000000001,
"max": 0.7699920000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010966693333333328,
"min": 0.00010966693333333328,
"max": 0.0048726552,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00032900079999999987,
"min": 0.00032900079999999987,
"max": 0.018502600799999998,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679689232",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679689669"
},
"total": 436.558251318,
"count": 1,
"self": 0.7601629530000764,
"children": {
"run_training.setup": {
"total": 0.10543423999990864,
"count": 1,
"self": 0.10543423999990864
},
"TrainerController.start_learning": {
"total": 435.692654125,
"count": 1,
"self": 0.5272435969986873,
"children": {
"TrainerController._reset_env": {
"total": 9.696206233999987,
"count": 1,
"self": 9.696206233999987
},
"TrainerController.advance": {
"total": 425.2349809660012,
"count": 18313,
"self": 0.2725053039946488,
"children": {
"env_step": {
"total": 424.96247566200657,
"count": 18313,
"self": 282.372781718007,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.3326968929947,
"count": 18313,
"self": 2.0609900819970335,
"children": {
"TorchPolicy.evaluate": {
"total": 140.27170681099767,
"count": 18313,
"self": 140.27170681099767
}
}
},
"workers": {
"total": 0.25699705100487336,
"count": 18313,
"self": 0.0,
"children": {
"worker_root": {
"total": 434.1261423409959,
"count": 18313,
"is_parallel": true,
"self": 200.96900196399963,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0057078970000929985,
"count": 1,
"is_parallel": true,
"self": 0.004249718999858487,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014581780002345113,
"count": 10,
"is_parallel": true,
"self": 0.0014581780002345113
}
}
},
"UnityEnvironment.step": {
"total": 0.04610757400007515,
"count": 1,
"is_parallel": true,
"self": 0.0005560160000186443,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004055839999637101,
"count": 1,
"is_parallel": true,
"self": 0.0004055839999637101
},
"communicator.exchange": {
"total": 0.04320131700001184,
"count": 1,
"is_parallel": true,
"self": 0.04320131700001184
},
"steps_from_proto": {
"total": 0.0019446570000809515,
"count": 1,
"is_parallel": true,
"self": 0.00039157999992767145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00155307700015328,
"count": 10,
"is_parallel": true,
"self": 0.00155307700015328
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 233.15714037699627,
"count": 18312,
"is_parallel": true,
"self": 9.37473942000122,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.014170033005598,
"count": 18312,
"is_parallel": true,
"self": 5.014170033005598
},
"communicator.exchange": {
"total": 188.53709181500165,
"count": 18312,
"is_parallel": true,
"self": 188.53709181500165
},
"steps_from_proto": {
"total": 30.231139108987804,
"count": 18312,
"is_parallel": true,
"self": 5.965508136972744,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.26563097201506,
"count": 183120,
"is_parallel": true,
"self": 24.26563097201506
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00026284200009740744,
"count": 1,
"self": 0.00026284200009740744,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 422.95301395200556,
"count": 293222,
"is_parallel": true,
"self": 7.127041561964688,
"children": {
"process_trajectory": {
"total": 182.04697437204095,
"count": 293222,
"is_parallel": true,
"self": 180.96902473204102,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0779496399999289,
"count": 4,
"is_parallel": true,
"self": 1.0779496399999289
}
}
},
"_update_policy": {
"total": 233.77899801799992,
"count": 62,
"is_parallel": true,
"self": 72.19092925399832,
"children": {
"TorchPPOOptimizer.update": {
"total": 161.5880687640016,
"count": 3636,
"is_parallel": true,
"self": 161.5880687640016
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23396048600000086,
"count": 1,
"self": 0.0014364419999992606,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2325240440000016,
"count": 1,
"self": 0.2325240440000016
}
}
}
}
}
}
}