YousefAyoubi's picture
First Push
fd15e64 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.8805291652679443,
"min": 2.8794522285461426,
"max": 2.8901326656341553,
"count": 5
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 2281.379150390625,
"min": 2027.1343994140625,
"max": 4308.11767578125,
"count": 5
},
"SnowballTarget.Step.mean": {
"value": 4976.0,
"min": 960.0,
"max": 4976.0,
"count": 5
},
"SnowballTarget.Step.sum": {
"value": 4976.0,
"min": 960.0,
"max": 4976.0,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4754427373409271,
"min": 0.13765957951545715,
"max": 0.4754427373409271,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 11.886068344116211,
"min": 2.0648937225341797,
"max": 11.886068344116211,
"count": 5
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.09165913168844246,
"min": 0.09057231067304707,
"max": 0.09165913168844246,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.09165913168844246,
"min": 0.09057231067304707,
"max": 0.09165913168844246,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1738411092436781,
"min": 0.07696533840939854,
"max": 0.1738411092436781,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.1738411092436781,
"min": 0.07696533840939854,
"max": 0.1738411092436781,
"count": 2
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.128008623999997e-05,
"min": 4.128008623999997e-05,
"max": 0.00017328004224000004,
"count": 2
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.128008623999997e-05,
"min": 4.128008623999997e-05,
"max": 0.00017328004224000004,
"count": 2
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.11375999999999996,
"min": 0.11375999999999996,
"max": 0.15776000000000004,
"count": 2
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.11375999999999996,
"min": 0.11375999999999996,
"max": 0.15776000000000004,
"count": 2
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.742399999999996e-05,
"min": 7.742399999999996e-05,
"max": 0.0002930240000000001,
"count": 2
},
"SnowballTarget.Policy.Beta.sum": {
"value": 7.742399999999996e-05,
"min": 7.742399999999996e-05,
"max": 0.0002930240000000001,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 2189.0,
"min": 2189.0,
"max": 2189.0,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 4.181818181818182,
"min": 1.5454545454545454,
"max": 4.181818181818182,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 46.0,
"min": 17.0,
"max": 46.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 4.181818181818182,
"min": 1.5454545454545454,
"max": 4.181818181818182,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 46.0,
"min": 17.0,
"max": 46.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724184501",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1724184520"
},
"total": 19.881858902000005,
"count": 1,
"self": 0.9217989770000088,
"children": {
"run_training.setup": {
"total": 0.07438043900000935,
"count": 1,
"self": 0.07438043900000935
},
"TrainerController.start_learning": {
"total": 18.885679485999987,
"count": 1,
"self": 0.015322810999691683,
"children": {
"TrainerController._reset_env": {
"total": 3.485792439999983,
"count": 1,
"self": 3.485792439999983
},
"TrainerController.advance": {
"total": 15.09980349200029,
"count": 464,
"self": 0.017548086000147123,
"children": {
"env_step": {
"total": 10.660487128999762,
"count": 464,
"self": 8.8826276409998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1.7687551509999082,
"count": 464,
"self": 0.05658154600007492,
"children": {
"TorchPolicy.evaluate": {
"total": 1.7121736049998333,
"count": 464,
"self": 1.7121736049998333
}
}
},
"workers": {
"total": 0.00910433700005342,
"count": 464,
"self": 0.0,
"children": {
"worker_root": {
"total": 18.452305042999797,
"count": 464,
"is_parallel": true,
"self": 10.891899674999792,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00763912600001504,
"count": 1,
"is_parallel": true,
"self": 0.0056157049999683295,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00202342100004671,
"count": 10,
"is_parallel": true,
"self": 0.00202342100004671
}
}
},
"UnityEnvironment.step": {
"total": 0.04497575800002096,
"count": 1,
"is_parallel": true,
"self": 0.0008315270000309738,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005604689999927359,
"count": 1,
"is_parallel": true,
"self": 0.0005604689999927359
},
"communicator.exchange": {
"total": 0.04096197699999493,
"count": 1,
"is_parallel": true,
"self": 0.04096197699999493
},
"steps_from_proto": {
"total": 0.002621785000002319,
"count": 1,
"is_parallel": true,
"self": 0.00047646400000189715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002145321000000422,
"count": 10,
"is_parallel": true,
"self": 0.002145321000000422
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 7.560405368000005,
"count": 463,
"is_parallel": true,
"self": 0.38288408499991533,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.19775328800014336,
"count": 463,
"is_parallel": true,
"self": 0.19775328800014336
},
"communicator.exchange": {
"total": 5.857596682000008,
"count": 463,
"is_parallel": true,
"self": 5.857596682000008
},
"steps_from_proto": {
"total": 1.1221713129999387,
"count": 463,
"is_parallel": true,
"self": 0.210148166999943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.9120231459999957,
"count": 4630,
"is_parallel": true,
"self": 0.9120231459999957
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4.421768277000382,
"count": 464,
"self": 0.02338984400077493,
"children": {
"process_trajectory": {
"total": 1.0512927559996115,
"count": 464,
"self": 1.0512927559996115
},
"_update_policy": {
"total": 3.3470856769999955,
"count": 2,
"self": 1.0336249809999742,
"children": {
"TorchPPOOptimizer.update": {
"total": 2.3134606960000212,
"count": 201,
"self": 2.3134606960000212
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.469000011411481e-06,
"count": 1,
"self": 1.469000011411481e-06
},
"TrainerController._save_models": {
"total": 0.28475927400000955,
"count": 1,
"self": 0.0011119020000194269,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2836473719999901,
"count": 1,
"self": 0.2836473719999901
}
}
}
}
}
}
}