Hariprasath28's picture
First Push
b5bb738
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3216782808303833,
"min": 1.3216782808303833,
"max": 2.8834047317504883,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12619.3837890625,
"min": 12619.3837890625,
"max": 29592.3828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.707954406738281,
"min": 0.226125106215477,
"max": 11.707954406738281,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2283.051025390625,
"min": 43.86827087402344,
"max": 2347.169921875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07054304578345824,
"min": 0.06394238597056436,
"max": 0.07360479509685212,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28217218313383297,
"min": 0.25736847893676895,
"max": 0.36190987325115953,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24640186766491215,
"min": 0.0914473842481932,
"max": 0.2683575829381452,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9856074706596486,
"min": 0.3657895369927728,
"max": 1.3312666701043354,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.694097306000002e-06,
"min": 2.694097306000002e-06,
"max": 9.7294002706e-05,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.0776389224000008e-05,
"min": 1.0776389224000008e-05,
"max": 0.0004617200382800001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.197294,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.055060000000006e-05,
"min": 9.055060000000006e-05,
"max": 0.0029190906000000003,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00036220240000000024,
"min": 0.00036220240000000024,
"max": 0.013855428000000001,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.40909090909091,
"min": 2.6136363636363638,
"max": 23.40909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1030.0,
"min": 115.0,
"max": 1261.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.40909090909091,
"min": 2.6136363636363638,
"max": 23.40909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1030.0,
"min": 115.0,
"max": 1261.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682590480",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682590982"
},
"total": 501.68080369400013,
"count": 1,
"self": 0.42889803800017035,
"children": {
"run_training.setup": {
"total": 0.11720615000012913,
"count": 1,
"self": 0.11720615000012913
},
"TrainerController.start_learning": {
"total": 501.13469950599983,
"count": 1,
"self": 0.57686195696715,
"children": {
"TrainerController._reset_env": {
"total": 3.863136655999824,
"count": 1,
"self": 3.863136655999824
},
"TrainerController.advance": {
"total": 496.544205550033,
"count": 18197,
"self": 0.2849036070360853,
"children": {
"env_step": {
"total": 496.2593019429969,
"count": 18197,
"self": 371.83671594501584,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.14734737697518,
"count": 18197,
"self": 1.7621767459784223,
"children": {
"TorchPolicy.evaluate": {
"total": 122.38517063099675,
"count": 18197,
"self": 122.38517063099675
}
}
},
"workers": {
"total": 0.27523862100588303,
"count": 18197,
"self": 0.0,
"children": {
"worker_root": {
"total": 499.53344662499694,
"count": 18197,
"is_parallel": true,
"self": 247.61405376500716,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004670456999974704,
"count": 1,
"is_parallel": true,
"self": 0.0033058900003197778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001364566999654926,
"count": 10,
"is_parallel": true,
"self": 0.001364566999654926
}
}
},
"UnityEnvironment.step": {
"total": 0.037968624999848544,
"count": 1,
"is_parallel": true,
"self": 0.000582373000042935,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036711199982164544,
"count": 1,
"is_parallel": true,
"self": 0.00036711199982164544
},
"communicator.exchange": {
"total": 0.034895277000032365,
"count": 1,
"is_parallel": true,
"self": 0.034895277000032365
},
"steps_from_proto": {
"total": 0.002123862999951598,
"count": 1,
"is_parallel": true,
"self": 0.0004324699998505821,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016913930001010158,
"count": 10,
"is_parallel": true,
"self": 0.0016913930001010158
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 251.91939285998978,
"count": 18196,
"is_parallel": true,
"self": 10.380015326972625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.536308652009666,
"count": 18196,
"is_parallel": true,
"self": 5.536308652009666
},
"communicator.exchange": {
"total": 203.28506895500823,
"count": 18196,
"is_parallel": true,
"self": 203.28506895500823
},
"steps_from_proto": {
"total": 32.717999925999266,
"count": 18196,
"is_parallel": true,
"self": 6.522596904067996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.19540302193127,
"count": 181960,
"is_parallel": true,
"self": 26.19540302193127
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014398799999071343,
"count": 1,
"self": 0.00014398799999071343,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 492.94489132504737,
"count": 424460,
"is_parallel": true,
"self": 10.66820210306264,
"children": {
"process_trajectory": {
"total": 255.2104828349859,
"count": 424460,
"is_parallel": true,
"self": 254.78312833098607,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42735450399982255,
"count": 2,
"is_parallel": true,
"self": 0.42735450399982255
}
}
},
"_update_policy": {
"total": 227.06620638699883,
"count": 90,
"is_parallel": true,
"self": 88.15022115399688,
"children": {
"TorchPPOOptimizer.update": {
"total": 138.91598523300195,
"count": 6116,
"is_parallel": true,
"self": 138.91598523300195
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15035135499988428,
"count": 1,
"self": 0.0008800739999514917,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1494712809999328,
"count": 1,
"self": 0.1494712809999328
}
}
}
}
}
}
}