ezrab's picture
First Push
ab5a7e5 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8389100432395935,
"min": 0.8389100432395935,
"max": 2.8364956378936768,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7973.0009765625,
"min": 7973.0009765625,
"max": 28954.947265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.887752532958984,
"min": 0.38027796149253845,
"max": 13.887752532958984,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2708.11181640625,
"min": 73.77392578125,
"max": 2803.7685546875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.02890364301977873,
"min": 0.022775418423407245,
"max": 0.039103600812571436,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.11561457207911492,
"min": 0.10328110918635502,
"max": 0.18568537911293484,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1635266486555338,
"min": 0.1306904087153574,
"max": 0.29158857837319374,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6541065946221352,
"min": 0.5227616348614296,
"max": 1.4579428918659687,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.0013851600382799997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102694,
"min": 0.102694,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.410776,
"min": 0.410776,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014443060000000012,
"min": 0.00014443060000000012,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000005,
"min": 0.0005777224000000005,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.454545454545453,
"min": 3.522727272727273,
"max": 27.454545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1208.0,
"min": 155.0,
"max": 1494.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.454545454545453,
"min": 3.522727272727273,
"max": 27.454545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1208.0,
"min": 155.0,
"max": 1494.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741123067",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget4 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741123535"
},
"total": 467.8614571379994,
"count": 1,
"self": 0.4317735669992544,
"children": {
"run_training.setup": {
"total": 0.021210648999840487,
"count": 1,
"self": 0.021210648999840487
},
"TrainerController.start_learning": {
"total": 467.4084729220003,
"count": 1,
"self": 0.3795009750574536,
"children": {
"TrainerController._reset_env": {
"total": 2.0207432250003876,
"count": 1,
"self": 2.0207432250003876
},
"TrainerController.advance": {
"total": 464.32578011794294,
"count": 18192,
"self": 0.38445776996240966,
"children": {
"env_step": {
"total": 340.72784528003876,
"count": 18192,
"self": 248.49223499304298,
"children": {
"SubprocessEnvManager._take_step": {
"total": 92.01695063303032,
"count": 18192,
"self": 1.350645945971337,
"children": {
"TorchPolicy.evaluate": {
"total": 90.66630468705898,
"count": 18192,
"self": 90.66630468705898
}
}
},
"workers": {
"total": 0.21865965396546017,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 464.6647627449738,
"count": 18192,
"is_parallel": true,
"self": 246.9382115389044,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029227750001155073,
"count": 1,
"is_parallel": true,
"self": 0.0009071730014511559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020156019986643514,
"count": 10,
"is_parallel": true,
"self": 0.0020156019986643514
}
}
},
"UnityEnvironment.step": {
"total": 0.03550738800004183,
"count": 1,
"is_parallel": true,
"self": 0.0005437910003820434,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041364899971085833,
"count": 1,
"is_parallel": true,
"self": 0.00041364899971085833
},
"communicator.exchange": {
"total": 0.03272156100001666,
"count": 1,
"is_parallel": true,
"self": 0.03272156100001666
},
"steps_from_proto": {
"total": 0.0018283869999322633,
"count": 1,
"is_parallel": true,
"self": 0.00037317100122891134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001455215998703352,
"count": 10,
"is_parallel": true,
"self": 0.001455215998703352
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 217.72655120606942,
"count": 18191,
"is_parallel": true,
"self": 9.98475224804588,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.5771694500322155,
"count": 18191,
"is_parallel": true,
"self": 5.5771694500322155
},
"communicator.exchange": {
"total": 169.11668517505223,
"count": 18191,
"is_parallel": true,
"self": 169.11668517505223
},
"steps_from_proto": {
"total": 33.047944332939096,
"count": 18191,
"is_parallel": true,
"self": 5.966573635113036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.08137069782606,
"count": 181910,
"is_parallel": true,
"self": 27.08137069782606
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 123.21347706794177,
"count": 18192,
"self": 0.4546086459272374,
"children": {
"process_trajectory": {
"total": 54.86269106301643,
"count": 18192,
"self": 52.285580200016284,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5771108630001436,
"count": 4,
"self": 2.5771108630001436
}
}
},
"_update_policy": {
"total": 67.8961773589981,
"count": 90,
"self": 39.90245873201184,
"children": {
"TorchPPOOptimizer.update": {
"total": 27.993718626986265,
"count": 1080,
"self": 27.993718626986265
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.030000001075678e-07,
"count": 1,
"self": 9.030000001075678e-07
},
"TrainerController._save_models": {
"total": 0.6824477009995462,
"count": 1,
"self": 0.021447028999318718,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6610006720002275,
"count": 1,
"self": 0.6610006720002275
}
}
}
}
}
}
}