bsenst's picture
add ppo snowballtarget agent
af946fb
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.2936456203460693,
"min": 2.2936456203460693,
"max": 2.866025924682617,
"count": 6
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 22152.029296875,
"min": 22152.029296875,
"max": 29350.97265625,
"count": 6
},
"SnowballTarget.Step.mean": {
"value": 59976.0,
"min": 9952.0,
"max": 59976.0,
"count": 6
},
"SnowballTarget.Step.sum": {
"value": 59976.0,
"min": 9952.0,
"max": 59976.0,
"count": 6
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.7627153396606445,
"min": 0.34516000747680664,
"max": 4.7627153396606445,
"count": 6
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 976.3566284179688,
"min": 66.96104431152344,
"max": 976.3566284179688,
"count": 6
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 6
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 6
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06690161215568272,
"min": 0.062435699654765905,
"max": 0.07468453972643485,
"count": 6
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.33450806077841355,
"min": 0.24974279861906362,
"max": 0.34266459971010643,
"count": 6
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2576114994638106,
"min": 0.12173185954281293,
"max": 0.2876496722593027,
"count": 6
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.288057497319053,
"min": 0.4869274381712517,
"max": 1.3561888936103559,
"count": 6
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.544009152e-05,
"min": 2.544009152e-05,
"max": 0.00027294000901999997,
"count": 6
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0001272004576,
"min": 0.0001272004576,
"max": 0.0011172001276,
"count": 6
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10848000000000002,
"min": 0.10848000000000002,
"max": 0.19098,
"count": 6
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5424000000000001,
"min": 0.49992000000000014,
"max": 0.8724000000000001,
"count": 6
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0004331519999999999,
"min": 0.0004331519999999999,
"max": 0.004549902,
"count": 6
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0021657599999999996,
"min": 0.0021657599999999996,
"max": 0.01863276,
"count": 6
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 11.345454545454546,
"min": 3.522727272727273,
"max": 11.345454545454546,
"count": 6
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 624.0,
"min": 155.0,
"max": 624.0,
"count": 6
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 11.345454545454546,
"min": 3.522727272727273,
"max": 11.345454545454546,
"count": 6
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 624.0,
"min": 155.0,
"max": 624.0,
"count": 6
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 6
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 6
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680784928",
"python_version": "3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:04:59) [GCC 10.3.0]",
"command_line_arguments": "ml-agents/mlagents/trainers/learn.py ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680785074"
},
"total": 145.45940288200018,
"count": 1,
"self": 0.48453295000035723,
"children": {
"run_training.setup": {
"total": 0.021707205999973667,
"count": 1,
"self": 0.021707205999973667
},
"TrainerController.start_learning": {
"total": 144.95316272599985,
"count": 1,
"self": 0.16149714400512494,
"children": {
"TrainerController._reset_env": {
"total": 3.113833035000198,
"count": 1,
"self": 3.113833035000198
},
"TrainerController.advance": {
"total": 141.46736615899476,
"count": 5471,
"self": 0.07663059798960603,
"children": {
"env_step": {
"total": 141.39073556100516,
"count": 5471,
"self": 101.17150663101233,
"children": {
"SubprocessEnvManager._take_step": {
"total": 40.136498686992354,
"count": 5471,
"self": 0.5052758879951398,
"children": {
"TorchPolicy.evaluate": {
"total": 39.631222798997214,
"count": 5471,
"self": 39.631222798997214
}
}
},
"workers": {
"total": 0.08273024300046927,
"count": 5471,
"self": 0.0,
"children": {
"worker_root": {
"total": 144.3440517189972,
"count": 5471,
"is_parallel": true,
"self": 65.2818101589994,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001980684000045585,
"count": 1,
"is_parallel": true,
"self": 0.0006041300000561023,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013765539999894827,
"count": 10,
"is_parallel": true,
"self": 0.0013765539999894827
}
}
},
"UnityEnvironment.step": {
"total": 0.08273509900004683,
"count": 1,
"is_parallel": true,
"self": 0.0005849889998899016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003496950000680954,
"count": 1,
"is_parallel": true,
"self": 0.0003496950000680954
},
"communicator.exchange": {
"total": 0.07961141800001315,
"count": 1,
"is_parallel": true,
"self": 0.07961141800001315
},
"steps_from_proto": {
"total": 0.0021889970000756875,
"count": 1,
"is_parallel": true,
"self": 0.00044234500091988593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017466519991558016,
"count": 10,
"is_parallel": true,
"self": 0.0017466519991558016
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 79.0622415599978,
"count": 5470,
"is_parallel": true,
"self": 2.8316271569817673,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.7115460930017434,
"count": 5470,
"is_parallel": true,
"self": 1.7115460930017434
},
"communicator.exchange": {
"total": 64.90729787000214,
"count": 5470,
"is_parallel": true,
"self": 64.90729787000214
},
"steps_from_proto": {
"total": 9.611770440012151,
"count": 5470,
"is_parallel": true,
"self": 1.9485815850021027,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.663188855010048,
"count": 54700,
"is_parallel": true,
"self": 7.663188855010048
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001451719999749912,
"count": 1,
"self": 0.0001451719999749912,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 140.48691986093695,
"count": 114276,
"is_parallel": true,
"self": 2.6478413979371,
"children": {
"process_trajectory": {
"total": 78.75249613300025,
"count": 114276,
"is_parallel": true,
"self": 78.52266481600032,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22983131699993464,
"count": 1,
"is_parallel": true,
"self": 0.22983131699993464
}
}
},
"_update_policy": {
"total": 59.0865823299996,
"count": 27,
"is_parallel": true,
"self": 16.2498989529995,
"children": {
"TorchPPOOptimizer.update": {
"total": 42.8366833770001,
"count": 1374,
"is_parallel": true,
"self": 42.8366833770001
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21032121599978382,
"count": 1,
"self": 0.0012572019998060568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20906401399997776,
"count": 1,
"self": 0.20906401399997776
}
}
}
}
}
}
}