nikogarro's picture
First version
398b098
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6852125525474548,
"min": 0.6852125525474548,
"max": 2.8606576919555664,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6663.0068359375,
"min": 6663.0068359375,
"max": 29295.99609375,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.84305191040039,
"min": 0.5147283673286438,
"max": 13.843694686889648,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2837.82568359375,
"min": 99.85730743408203,
"max": 2837.82568359375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06337864182104229,
"min": 0.058124806985028504,
"max": 0.07526857352943396,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.31689320910521146,
"min": 0.23249922794011402,
"max": 0.36515579806234433,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17442244536736434,
"min": 0.10995531964170582,
"max": 0.2854801709161085,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8721122268368217,
"min": 0.4398212785668233,
"max": 1.4274008545805426,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.088098982400003e-06,
"min": 5.088098982400003e-06,
"max": 0.0004945880010823999,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.5440494912000017e-05,
"min": 2.5440494912000017e-05,
"max": 0.0024234400153119997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.09090909090909,
"min": 3.0454545454545454,
"max": 27.636363636363637,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1490.0,
"min": 134.0,
"max": 1498.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.09090909090909,
"min": 3.0454545454545454,
"max": 27.636363636363637,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1490.0,
"min": 134.0,
"max": 1498.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676225327",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676226497"
},
"total": 1170.1716283580004,
"count": 1,
"self": 0.6061936420003349,
"children": {
"run_training.setup": {
"total": 0.11607234600000993,
"count": 1,
"self": 0.11607234600000993
},
"TrainerController.start_learning": {
"total": 1169.44936237,
"count": 1,
"self": 1.3398824320449876,
"children": {
"TrainerController._reset_env": {
"total": 9.80446770200001,
"count": 1,
"self": 9.80446770200001
},
"TrainerController.advance": {
"total": 1158.115022670955,
"count": 45497,
"self": 0.7122245829370968,
"children": {
"env_step": {
"total": 1157.402798088018,
"count": 45497,
"self": 796.8833919799638,
"children": {
"SubprocessEnvManager._take_step": {
"total": 359.8396147690196,
"count": 45497,
"self": 3.8816991270085737,
"children": {
"TorchPolicy.evaluate": {
"total": 355.957915642011,
"count": 45497,
"self": 77.52140897604261,
"children": {
"TorchPolicy.sample_actions": {
"total": 278.4365066659684,
"count": 45497,
"self": 278.4365066659684
}
}
}
}
},
"workers": {
"total": 0.6797913390346366,
"count": 45497,
"self": 0.0,
"children": {
"worker_root": {
"total": 1165.0870669709677,
"count": 45497,
"is_parallel": true,
"self": 558.4806853059888,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008175265000090803,
"count": 1,
"is_parallel": true,
"self": 0.0035110280000481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004664237000042704,
"count": 10,
"is_parallel": true,
"self": 0.004664237000042704
}
}
},
"UnityEnvironment.step": {
"total": 0.10833077699999194,
"count": 1,
"is_parallel": true,
"self": 0.0005486400000336289,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004020760000003065,
"count": 1,
"is_parallel": true,
"self": 0.0004020760000003065
},
"communicator.exchange": {
"total": 0.10552545799998825,
"count": 1,
"is_parallel": true,
"self": 0.10552545799998825
},
"steps_from_proto": {
"total": 0.0018546029999697566,
"count": 1,
"is_parallel": true,
"self": 0.00042234299996835034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014322600000014063,
"count": 10,
"is_parallel": true,
"self": 0.0014322600000014063
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 606.6063816649789,
"count": 45496,
"is_parallel": true,
"self": 23.968145582886564,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.20084729403436,
"count": 45496,
"is_parallel": true,
"self": 13.20084729403436
},
"communicator.exchange": {
"total": 481.67963296601124,
"count": 45496,
"is_parallel": true,
"self": 481.67963296601124
},
"steps_from_proto": {
"total": 87.75775582204676,
"count": 45496,
"is_parallel": true,
"self": 18.229814229059002,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.52794159298776,
"count": 454960,
"is_parallel": true,
"self": 69.52794159298776
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001272499998776766,
"count": 1,
"self": 0.0001272499998776766,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1149.589102287021,
"count": 1022351,
"is_parallel": true,
"self": 25.486396508045118,
"children": {
"process_trajectory": {
"total": 654.4495150679761,
"count": 1022351,
"is_parallel": true,
"self": 652.074534450976,
"children": {
"RLTrainer._checkpoint": {
"total": 2.3749806170001193,
"count": 10,
"is_parallel": true,
"self": 2.3749806170001193
}
}
},
"_update_policy": {
"total": 469.65319071099975,
"count": 227,
"is_parallel": true,
"self": 161.48679543399953,
"children": {
"TorchPPOOptimizer.update": {
"total": 308.1663952770002,
"count": 11574,
"is_parallel": true,
"self": 308.1663952770002
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1898623150000276,
"count": 1,
"self": 0.0011901150000994676,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18867219999992813,
"count": 1,
"self": 0.18867219999992813
}
}
}
}
}
}
}