Josrf's picture
Snowball Target with default [except num_epoch and beta]
abdefc0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.07504403591156,
"min": 1.07504403591156,
"max": 2.867044687271118,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10323.6474609375,
"min": 10323.6474609375,
"max": 29424.48046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.075210571289062,
"min": 0.3742940425872803,
"max": 13.075210571289062,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2549.666015625,
"min": 72.61304473876953,
"max": 2644.62158203125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07105281547068015,
"min": 0.06352557456655845,
"max": 0.07124361084949188,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2842112618827206,
"min": 0.2541022982662338,
"max": 0.3506868609985652,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20085762344739017,
"min": 0.14484312018817838,
"max": 0.29652883695328935,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8034304937895607,
"min": 0.5793724807527135,
"max": 1.1861153478131574,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000009e-06,
"min": 8.082097306000009e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.2328389224000035e-05,
"min": 3.2328389224000035e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269399999999998,
"min": 0.10269399999999998,
"max": 0.197294,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4107759999999999,
"min": 0.4107759999999999,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00021178060000000016,
"min": 0.00021178060000000016,
"max": 0.007297320599999998,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0008471224000000006,
"min": 0.0008471224000000006,
"max": 0.034632828000000004,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.477272727272727,
"min": 3.9545454545454546,
"max": 25.70909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1121.0,
"min": 174.0,
"max": 1414.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.477272727272727,
"min": 3.9545454545454546,
"max": 25.70909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1121.0,
"min": 174.0,
"max": 1414.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691525010",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691525759"
},
"total": 748.3383881939999,
"count": 1,
"self": 1.0035002989998247,
"children": {
"run_training.setup": {
"total": 0.06995849199995519,
"count": 1,
"self": 0.06995849199995519
},
"TrainerController.start_learning": {
"total": 747.2649294030001,
"count": 1,
"self": 0.8430205729921454,
"children": {
"TrainerController._reset_env": {
"total": 2.9237567019999915,
"count": 1,
"self": 2.9237567019999915
},
"TrainerController.advance": {
"total": 743.2149369890079,
"count": 18225,
"self": 0.413641801003223,
"children": {
"env_step": {
"total": 742.8012951880047,
"count": 18225,
"self": 617.4460272489978,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.91751310600205,
"count": 18225,
"self": 2.5461919280110124,
"children": {
"TorchPolicy.evaluate": {
"total": 122.37132117799104,
"count": 18225,
"self": 122.37132117799104
}
}
},
"workers": {
"total": 0.43775483300487394,
"count": 18225,
"self": 0.0,
"children": {
"worker_root": {
"total": 744.8853318930027,
"count": 18225,
"is_parallel": true,
"self": 390.3932109770004,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00834191199999168,
"count": 1,
"is_parallel": true,
"self": 0.006041395000011107,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002300516999980573,
"count": 10,
"is_parallel": true,
"self": 0.002300516999980573
}
}
},
"UnityEnvironment.step": {
"total": 0.05365702200003852,
"count": 1,
"is_parallel": true,
"self": 0.000785504000077708,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006071779999956561,
"count": 1,
"is_parallel": true,
"self": 0.0006071779999956561
},
"communicator.exchange": {
"total": 0.049187695999989955,
"count": 1,
"is_parallel": true,
"self": 0.049187695999989955
},
"steps_from_proto": {
"total": 0.0030766439999752038,
"count": 1,
"is_parallel": true,
"self": 0.0006064499997364692,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024701940002387346,
"count": 10,
"is_parallel": true,
"self": 0.0024701940002387346
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 354.49212091600225,
"count": 18224,
"is_parallel": true,
"self": 13.916237731998763,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.710809847001087,
"count": 18224,
"is_parallel": true,
"self": 8.710809847001087
},
"communicator.exchange": {
"total": 277.0797108860147,
"count": 18224,
"is_parallel": true,
"self": 277.0797108860147
},
"steps_from_proto": {
"total": 54.78536245098769,
"count": 18224,
"is_parallel": true,
"self": 11.232379375006758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 43.55298307598093,
"count": 182240,
"is_parallel": true,
"self": 43.55298307598093
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019869800007654703,
"count": 1,
"self": 0.00019869800007654703,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 736.4556591490199,
"count": 612092,
"is_parallel": true,
"self": 17.43691613197916,
"children": {
"process_trajectory": {
"total": 336.34192071203995,
"count": 612092,
"is_parallel": true,
"self": 334.6744493120399,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6674714000000677,
"count": 4,
"is_parallel": true,
"self": 1.6674714000000677
}
}
},
"_update_policy": {
"total": 382.67682230500077,
"count": 90,
"is_parallel": true,
"self": 152.73014152699557,
"children": {
"TorchPPOOptimizer.update": {
"total": 229.9466807780052,
"count": 7645,
"is_parallel": true,
"self": 229.9466807780052
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2830164409999725,
"count": 1,
"self": 0.0015304179999020562,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2814860230000704,
"count": 1,
"self": 0.2814860230000704
}
}
}
}
}
}
}