AmroAsw's picture
First Push
69d3fc9 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9806506037712097,
"min": 0.9806506037712097,
"max": 2.8608791828155518,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9320.103515625,
"min": 9320.103515625,
"max": 29203.853515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.16352367401123,
"min": 0.2675674259662628,
"max": 13.16352367401123,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2566.88720703125,
"min": 51.9080810546875,
"max": 2645.703857421875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06802802224624696,
"min": 0.0628585096616435,
"max": 0.07273902487370423,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27211208898498784,
"min": 0.251434038646574,
"max": 0.36369512436852114,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18540163301661905,
"min": 0.11987901202804756,
"max": 0.2959917340354592,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7416065320664762,
"min": 0.47951604811219023,
"max": 1.3714995489400976,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.90909090909091,
"min": 3.272727272727273,
"max": 25.927272727272726,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1140.0,
"min": 144.0,
"max": 1426.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.90909090909091,
"min": 3.272727272727273,
"max": 25.927272727272726,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1140.0,
"min": 144.0,
"max": 1426.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756574983",
"python_version": "3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0]",
"command_line_arguments": "/root/DeepRL_HuggingFace/Unit3/.venv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756575463"
},
"total": 480.7718617669998,
"count": 1,
"self": 0.21915289999992638,
"children": {
"run_training.setup": {
"total": 0.024531700000125056,
"count": 1,
"self": 0.024531700000125056
},
"TrainerController.start_learning": {
"total": 480.52817716699974,
"count": 1,
"self": 0.3188417010082958,
"children": {
"TrainerController._reset_env": {
"total": 2.5686239069998464,
"count": 1,
"self": 2.5686239069998464
},
"TrainerController.advance": {
"total": 477.541170858992,
"count": 18192,
"self": 0.3110575979787882,
"children": {
"env_step": {
"total": 340.06852876199855,
"count": 18192,
"self": 234.26361332295846,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.60814883905164,
"count": 18192,
"self": 1.0948364020596273,
"children": {
"TorchPolicy.evaluate": {
"total": 104.51331243699201,
"count": 18192,
"self": 104.51331243699201
}
}
},
"workers": {
"total": 0.19676659998845025,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 479.4417778620084,
"count": 18192,
"is_parallel": true,
"self": 265.4219448909946,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025632000001678534,
"count": 1,
"is_parallel": true,
"self": 0.0014231999998628453,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011400000003050081,
"count": 10,
"is_parallel": true,
"self": 0.0011400000003050081
}
}
},
"UnityEnvironment.step": {
"total": 0.02150819999997111,
"count": 1,
"is_parallel": true,
"self": 0.00020810000000892614,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00016980000009425567,
"count": 1,
"is_parallel": true,
"self": 0.00016980000009425567
},
"communicator.exchange": {
"total": 0.0204522999999881,
"count": 1,
"is_parallel": true,
"self": 0.0204522999999881
},
"steps_from_proto": {
"total": 0.0006779999998798303,
"count": 1,
"is_parallel": true,
"self": 0.000160700000151337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005172999997284933,
"count": 10,
"is_parallel": true,
"self": 0.0005172999997284933
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 214.01983297101378,
"count": 18191,
"is_parallel": true,
"self": 4.045821000982642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.626018493007905,
"count": 18191,
"is_parallel": true,
"self": 2.626018493007905
},
"communicator.exchange": {
"total": 194.65295224205033,
"count": 18191,
"is_parallel": true,
"self": 194.65295224205033
},
"steps_from_proto": {
"total": 12.695041234972905,
"count": 18191,
"is_parallel": true,
"self": 2.866156799055716,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.828884435917189,
"count": 181910,
"is_parallel": true,
"self": 9.828884435917189
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 137.16158449901468,
"count": 18192,
"self": 0.42061359702938717,
"children": {
"process_trajectory": {
"total": 27.58519571698389,
"count": 18192,
"self": 27.182226029984122,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4029696869997679,
"count": 4,
"self": 0.4029696869997679
}
}
},
"_update_policy": {
"total": 109.1557751850014,
"count": 90,
"self": 22.795602454996242,
"children": {
"TorchPPOOptimizer.update": {
"total": 86.36017273000516,
"count": 4587,
"self": 86.36017273000516
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.0995399999997062,
"count": 1,
"self": 0.0009329999998044514,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09860699999990175,
"count": 1,
"self": 0.09860699999990175
}
}
}
}
}
}
}