vinoth16's picture
First Push
ae8a2ee verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7236392498016357,
"min": 0.6671388149261475,
"max": 2.8741025924682617,
"count": 100
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6901.34765625,
"min": 6621.30419921875,
"max": 29450.615234375,
"count": 100
},
"SnowballTarget.Step.mean": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Step.sum": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.914094924926758,
"min": 0.20598557591438293,
"max": 13.997663497924805,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2713.24853515625,
"min": 39.96120071411133,
"max": 2864.0966796875,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.035934752648245194,
"min": 0.024169132694927008,
"max": 0.04102505868650041,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.14373901059298078,
"min": 0.09667653077970803,
"max": 0.19341183612899235,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19834377275158965,
"min": 0.10267827349404494,
"max": 0.3755148438115915,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7933750910063586,
"min": 0.4107130939761798,
"max": 1.6518405750393867,
"count": 100
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.3764995412000023e-06,
"min": 1.3764995412000023e-06,
"max": 0.00029837640054119997,
"count": 100
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.505998164800009e-06,
"min": 5.505998164800009e-06,
"max": 0.001477032007656,
"count": 100
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10045880000000001,
"min": 0.10045880000000001,
"max": 0.19945880000000002,
"count": 100
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40183520000000006,
"min": 0.40183520000000006,
"max": 0.992344,
"count": 100
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.289412000000005e-05,
"min": 3.289412000000005e-05,
"max": 0.00497299412,
"count": 100
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001315764800000002,
"min": 0.0001315764800000002,
"max": 0.0246179656,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.68888888888889,
"min": 2.6363636363636362,
"max": 27.654545454545456,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1201.0,
"min": 116.0,
"max": 1521.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.68888888888889,
"min": 2.6363636363636362,
"max": 27.654545454545456,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1201.0,
"min": 116.0,
"max": 1521.0,
"count": 100
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747901075",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747903666"
},
"total": 2591.0808448959997,
"count": 1,
"self": 1.3373183019998578,
"children": {
"run_training.setup": {
"total": 0.04386732899990875,
"count": 1,
"self": 0.04386732899990875
},
"TrainerController.start_learning": {
"total": 2589.699659265,
"count": 1,
"self": 4.3409059360847095,
"children": {
"TrainerController._reset_env": {
"total": 3.844263060000003,
"count": 1,
"self": 3.844263060000003
},
"TrainerController.advance": {
"total": 2581.381505847915,
"count": 90942,
"self": 2.137206398916078,
"children": {
"env_step": {
"total": 2579.244299448999,
"count": 90942,
"self": 1885.8009423379685,
"children": {
"SubprocessEnvManager._take_step": {
"total": 691.4001852959808,
"count": 90942,
"self": 10.811671299006093,
"children": {
"TorchPolicy.evaluate": {
"total": 680.5885139969747,
"count": 90942,
"self": 680.5885139969747
}
}
},
"workers": {
"total": 2.0431718150495044,
"count": 90942,
"self": 0.0,
"children": {
"worker_root": {
"total": 2580.490567093998,
"count": 90942,
"is_parallel": true,
"self": 1069.2596755530203,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006843603999982406,
"count": 1,
"is_parallel": true,
"self": 0.004562033999945925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022815700000364814,
"count": 10,
"is_parallel": true,
"self": 0.0022815700000364814
}
}
},
"UnityEnvironment.step": {
"total": 0.04793558399990161,
"count": 1,
"is_parallel": true,
"self": 0.0007180509999216156,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045635100002527906,
"count": 1,
"is_parallel": true,
"self": 0.00045635100002527906
},
"communicator.exchange": {
"total": 0.044513626999901135,
"count": 1,
"is_parallel": true,
"self": 0.044513626999901135
},
"steps_from_proto": {
"total": 0.0022475550000535804,
"count": 1,
"is_parallel": true,
"self": 0.00042605400017237116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018215009998812093,
"count": 10,
"is_parallel": true,
"self": 0.0018215009998812093
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1511.2308915409776,
"count": 90941,
"is_parallel": true,
"self": 69.50451772897077,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 36.050689971040356,
"count": 90941,
"is_parallel": true,
"self": 36.050689971040356
},
"communicator.exchange": {
"total": 1191.6192000440083,
"count": 90941,
"is_parallel": true,
"self": 1191.6192000440083
},
"steps_from_proto": {
"total": 214.0564837969581,
"count": 90941,
"is_parallel": true,
"self": 42.65219936110793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 171.40428443585017,
"count": 909410,
"is_parallel": true,
"self": 171.40428443585017
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00025783000000956235,
"count": 1,
"self": 0.00025783000000956235,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 2547.227404425341,
"count": 3877453,
"is_parallel": true,
"self": 84.31743167719196,
"children": {
"process_trajectory": {
"total": 1485.7807651361468,
"count": 3877453,
"is_parallel": true,
"self": 1483.1218496941465,
"children": {
"RLTrainer._checkpoint": {
"total": 2.6589154420004206,
"count": 10,
"is_parallel": true,
"self": 2.6589154420004206
}
}
},
"_update_policy": {
"total": 977.129207612002,
"count": 454,
"is_parallel": true,
"self": 341.6394724670047,
"children": {
"TorchPPOOptimizer.update": {
"total": 635.4897351449973,
"count": 5448,
"is_parallel": true,
"self": 635.4897351449973
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1327265909999369,
"count": 1,
"self": 0.0016084320000118169,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13111815899992507,
"count": 1,
"self": 0.13111815899992507
}
}
}
}
}
}
}