ndorgan's picture
First Push
4f1e5e4 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.566879153251648,
"min": 0.5503954887390137,
"max": 2.79774808883667,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5387.61962890625,
"min": 5387.61962890625,
"max": 28559.412109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.642589569091797,
"min": 0.3224088251590729,
"max": 6.673239231109619,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1295.304931640625,
"min": 62.54730987548828,
"max": 1359.3515625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06361664517666499,
"min": 0.06265586681476241,
"max": 0.07279008682041119,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25446658070665995,
"min": 0.25062346725904966,
"max": 0.36395043410205596,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16198729471687007,
"min": 0.1035327503763541,
"max": 0.2537927966491849,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6479491788674803,
"min": 0.4141310015054164,
"max": 1.2689639832459245,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0776097306000005e-05,
"min": 1.0776097306000005e-05,
"max": 0.000389176002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.310438922400002e-05,
"min": 4.310438922400002e-05,
"max": 0.0018468800382800002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10538800000000001,
"min": 0.10538800000000001,
"max": 0.29458799999999996,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.42155200000000004,
"min": 0.42155200000000004,
"max": 1.42344,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.34090909090909,
"min": 3.522727272727273,
"max": 26.34090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1159.0,
"min": 155.0,
"max": 1434.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.34090909090909,
"min": 3.522727272727273,
"max": 26.34090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1159.0,
"min": 155.0,
"max": 1434.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1752536393",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1752536811"
},
"total": 418.49619862200007,
"count": 1,
"self": 0.44255313300004673,
"children": {
"run_training.setup": {
"total": 0.02312537600005271,
"count": 1,
"self": 0.02312537600005271
},
"TrainerController.start_learning": {
"total": 418.03052011299997,
"count": 1,
"self": 0.32008733397833566,
"children": {
"TrainerController._reset_env": {
"total": 3.337090395999894,
"count": 1,
"self": 3.337090395999894
},
"TrainerController.advance": {
"total": 414.2958325740217,
"count": 18192,
"self": 0.3518960020267059,
"children": {
"env_step": {
"total": 294.94915536599524,
"count": 18192,
"self": 227.5202705050085,
"children": {
"SubprocessEnvManager._take_step": {
"total": 67.21934630699388,
"count": 18192,
"self": 1.1923045600096884,
"children": {
"TorchPolicy.evaluate": {
"total": 66.0270417469842,
"count": 18192,
"self": 66.0270417469842
}
}
},
"workers": {
"total": 0.20953855399284294,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 416.79906422400813,
"count": 18192,
"is_parallel": true,
"self": 216.23835786999894,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00637842199989791,
"count": 1,
"is_parallel": true,
"self": 0.004932240999778514,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014461810001193953,
"count": 10,
"is_parallel": true,
"self": 0.0014461810001193953
}
}
},
"UnityEnvironment.step": {
"total": 0.035328498999888325,
"count": 1,
"is_parallel": true,
"self": 0.0005985210000289953,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038746799987166014,
"count": 1,
"is_parallel": true,
"self": 0.00038746799987166014
},
"communicator.exchange": {
"total": 0.03247618499995042,
"count": 1,
"is_parallel": true,
"self": 0.03247618499995042
},
"steps_from_proto": {
"total": 0.00186632500003725,
"count": 1,
"is_parallel": true,
"self": 0.0003663969998797256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014999280001575244,
"count": 10,
"is_parallel": true,
"self": 0.0014999280001575244
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 200.5607063540092,
"count": 18191,
"is_parallel": true,
"self": 9.715910417023451,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.2830345280017355,
"count": 18191,
"is_parallel": true,
"self": 5.2830345280017355
},
"communicator.exchange": {
"total": 154.8901882569919,
"count": 18191,
"is_parallel": true,
"self": 154.8901882569919
},
"steps_from_proto": {
"total": 30.671573151992106,
"count": 18191,
"is_parallel": true,
"self": 5.370330367952192,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.301242784039914,
"count": 181910,
"is_parallel": true,
"self": 25.301242784039914
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 118.99478120599974,
"count": 18192,
"self": 0.38630194602478696,
"children": {
"process_trajectory": {
"total": 25.827569891972644,
"count": 18192,
"self": 25.406613456972764,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42095643499988,
"count": 4,
"self": 0.42095643499988
}
}
},
"_update_policy": {
"total": 92.78090936800231,
"count": 90,
"self": 38.38074420099315,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.40016516700916,
"count": 4587,
"self": 54.40016516700916
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.520000847056508e-07,
"count": 1,
"self": 8.520000847056508e-07
},
"TrainerController._save_models": {
"total": 0.07750895699996363,
"count": 1,
"self": 0.0008132970001497597,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07669565999981387,
"count": 1,
"self": 0.07669565999981387
}
}
}
}
}
}
}