Cryoscopic-E's picture
First Attempt
32eb134 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8756429553031921,
"min": 0.8593656420707703,
"max": 2.84114146232605,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8938.5634765625,
"min": 8243.03515625,
"max": 29002.373046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.158675193786621,
"min": 0.45054030418395996,
"max": 13.158675193786621,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2565.941650390625,
"min": 86.0531997680664,
"max": 2665.54638671875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07410422843817037,
"min": 0.057967368478043055,
"max": 0.07467315050227769,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2964169137526815,
"min": 0.24569042939601426,
"max": 0.359067719732183,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18217703528410079,
"min": 0.1538376708551511,
"max": 0.27213079818323543,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7287081411364031,
"min": 0.6153506834206044,
"max": 1.3606539909161772,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.794097402e-06,
"min": 7.794097402e-06,
"max": 0.000291594002802,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.1176389608e-05,
"min": 3.1176389608e-05,
"max": 0.0013837200387599997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10259800000000002,
"min": 0.10259800000000002,
"max": 0.19719800000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4103920000000001,
"min": 0.4103920000000001,
"max": 0.9612400000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00013964020000000003,
"min": 0.00013964020000000003,
"max": 0.0048601802,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005585608000000001,
"min": 0.0005585608000000001,
"max": 0.023065875999999996,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.181818181818183,
"min": 4.795454545454546,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1152.0,
"min": 211.0,
"max": 1411.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.181818181818183,
"min": 4.795454545454546,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1152.0,
"min": 211.0,
"max": 1411.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747492240",
"python_version": "3.10.16 (main, Dec 11 2024, 16:24:50) [GCC 11.2.0]",
"command_line_arguments": "/home/emanuele/miniconda3/envs/mlagents/bin/mlagents-learn /home/emanuele/Projects/ml-agents/config/ppo/SnowballTarget.yaml --env=./trained-env/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747492455"
},
"total": 215.1125194920005,
"count": 1,
"self": 0.21774217300117016,
"children": {
"run_training.setup": {
"total": 0.027775348000432132,
"count": 1,
"self": 0.027775348000432132
},
"TrainerController.start_learning": {
"total": 214.8670019709989,
"count": 1,
"self": 0.2292224710145092,
"children": {
"TrainerController._reset_env": {
"total": 1.3673971320004057,
"count": 1,
"self": 1.3673971320004057
},
"TrainerController.advance": {
"total": 213.2178821939833,
"count": 18192,
"self": 0.22068692109314725,
"children": {
"env_step": {
"total": 141.88855539999895,
"count": 18192,
"self": 98.16271313912694,
"children": {
"SubprocessEnvManager._take_step": {
"total": 43.587273086954156,
"count": 18192,
"self": 0.6589823949634592,
"children": {
"TorchPolicy.evaluate": {
"total": 42.928290691990696,
"count": 18192,
"self": 42.928290691990696
}
}
},
"workers": {
"total": 0.13856917391785828,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 214.46101288787213,
"count": 18192,
"is_parallel": true,
"self": 129.7783942488404,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010103370004799217,
"count": 1,
"is_parallel": true,
"self": 0.00028784700225514825,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007224899982247734,
"count": 10,
"is_parallel": true,
"self": 0.0007224899982247734
}
}
},
"UnityEnvironment.step": {
"total": 0.014677832999950624,
"count": 1,
"is_parallel": true,
"self": 0.0002055830009339843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00015319500016630627,
"count": 1,
"is_parallel": true,
"self": 0.00015319500016630627
},
"communicator.exchange": {
"total": 0.013649169999553123,
"count": 1,
"is_parallel": true,
"self": 0.013649169999553123
},
"steps_from_proto": {
"total": 0.00066988499929721,
"count": 1,
"is_parallel": true,
"self": 0.000158017996000126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000511867003297084,
"count": 10,
"is_parallel": true,
"self": 0.000511867003297084
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 84.68261863903172,
"count": 18191,
"is_parallel": true,
"self": 3.3021643408628734,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.976723401043273,
"count": 18191,
"is_parallel": true,
"self": 1.976723401043273
},
"communicator.exchange": {
"total": 68.3873693870155,
"count": 18191,
"is_parallel": true,
"self": 68.3873693870155
},
"steps_from_proto": {
"total": 11.016361510110073,
"count": 18191,
"is_parallel": true,
"self": 2.392874241253594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 8.62348726885648,
"count": 181910,
"is_parallel": true,
"self": 8.62348726885648
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 71.10863987289122,
"count": 18192,
"self": 0.27175934183651407,
"children": {
"process_trajectory": {
"total": 15.594262302052812,
"count": 18192,
"self": 15.304888923052204,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2893733790006081,
"count": 4,
"self": 0.2893733790006081
}
}
},
"_update_policy": {
"total": 55.24261822900189,
"count": 90,
"self": 21.284006324965958,
"children": {
"TorchPPOOptimizer.update": {
"total": 33.95861190403593,
"count": 4587,
"self": 33.95861190403593
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.609999789157882e-07,
"count": 1,
"self": 5.609999789157882e-07
},
"TrainerController._save_models": {
"total": 0.05249961300069117,
"count": 1,
"self": 0.0005349310013116337,
"children": {
"RLTrainer._checkpoint": {
"total": 0.051964681999379536,
"count": 1,
"self": 0.051964681999379536
}
}
}
}
}
}
}