ManishW's picture
SnowballTarget irst Push
2d0b7f6
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.8220418691635132,
"min": 1.8220418691635132,
"max": 2.868769884109497,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 18739.701171875,
"min": 18202.9609375,
"max": 29379.072265625,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.094221115112305,
"min": 0.3645612895488739,
"max": 9.094221115112305,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1855.22119140625,
"min": 70.72489166259766,
"max": 1855.22119140625,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.09952812653788772,
"min": 0.09240609132582048,
"max": 0.104680614932328,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.4976406326894386,
"min": 0.38663522339672507,
"max": 0.513275399057973,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.3432937054657469,
"min": 0.1304090313861878,
"max": 0.3668966120948978,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.7164685273287343,
"min": 0.5216361255447513,
"max": 1.834483060474489,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.7440094511999994e-05,
"min": 2.7440094511999994e-05,
"max": 0.0004729400054119999,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00013720047255999998,
"min": 0.00013720047255999998,
"max": 0.0021172000765599993,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.004729941200000001,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179655999999998,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.054545454545455,
"min": 2.9545454545454546,
"max": 19.054545454545455,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1048.0,
"min": 130.0,
"max": 1048.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.054545454545455,
"min": 2.9545454545454546,
"max": 19.054545454545455,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1048.0,
"min": 130.0,
"max": 1048.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681020190",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681020441"
},
"total": 250.86042513000007,
"count": 1,
"self": 0.8405507160000525,
"children": {
"run_training.setup": {
"total": 0.13395044399999279,
"count": 1,
"self": 0.13395044399999279
},
"TrainerController.start_learning": {
"total": 249.88592397000002,
"count": 1,
"self": 0.3271283140002197,
"children": {
"TrainerController._reset_env": {
"total": 3.938296484000034,
"count": 1,
"self": 3.938296484000034
},
"TrainerController.advance": {
"total": 245.40566417299976,
"count": 9149,
"self": 0.12925223600473146,
"children": {
"env_step": {
"total": 245.27641193699503,
"count": 9149,
"self": 189.36116552597878,
"children": {
"SubprocessEnvManager._take_step": {
"total": 55.781731156004184,
"count": 9149,
"self": 0.8368487160055338,
"children": {
"TorchPolicy.evaluate": {
"total": 54.94488243999865,
"count": 9149,
"self": 54.94488243999865
}
}
},
"workers": {
"total": 0.13351525501207107,
"count": 9149,
"self": 0.0,
"children": {
"worker_root": {
"total": 248.99388749799914,
"count": 9149,
"is_parallel": true,
"self": 126.70747975400207,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008510051999905954,
"count": 1,
"is_parallel": true,
"self": 0.007036900999878526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014731510000274284,
"count": 10,
"is_parallel": true,
"self": 0.0014731510000274284
}
}
},
"UnityEnvironment.step": {
"total": 0.03642179100006615,
"count": 1,
"is_parallel": true,
"self": 0.0005420590001676828,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003173110000034285,
"count": 1,
"is_parallel": true,
"self": 0.0003173110000034285
},
"communicator.exchange": {
"total": 0.03360143799989146,
"count": 1,
"is_parallel": true,
"self": 0.03360143799989146
},
"steps_from_proto": {
"total": 0.00196098300000358,
"count": 1,
"is_parallel": true,
"self": 0.00038466800003789103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001576314999965689,
"count": 10,
"is_parallel": true,
"self": 0.001576314999965689
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 122.28640774399707,
"count": 9148,
"is_parallel": true,
"self": 4.929207498980645,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.677457587004824,
"count": 9148,
"is_parallel": true,
"self": 2.677457587004824
},
"communicator.exchange": {
"total": 98.95680937100383,
"count": 9148,
"is_parallel": true,
"self": 98.95680937100383
},
"steps_from_proto": {
"total": 15.722933287007777,
"count": 9148,
"is_parallel": true,
"self": 3.1125190980311572,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.61041418897662,
"count": 91480,
"is_parallel": true,
"self": 12.61041418897662
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00036474099999850296,
"count": 1,
"self": 0.00036474099999850296,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 243.7236517519915,
"count": 203939,
"is_parallel": true,
"self": 4.916172322052375,
"children": {
"process_trajectory": {
"total": 123.71925832693978,
"count": 203939,
"is_parallel": true,
"self": 123.03018045793988,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6890778689999024,
"count": 2,
"is_parallel": true,
"self": 0.6890778689999024
}
}
},
"_update_policy": {
"total": 115.08822110299934,
"count": 45,
"is_parallel": true,
"self": 32.12658355099359,
"children": {
"TorchPPOOptimizer.update": {
"total": 82.96163755200575,
"count": 4587,
"is_parallel": true,
"self": 82.96163755200575
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21447025800000574,
"count": 1,
"self": 0.0012105099999644153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21325974800004133,
"count": 1,
"self": 0.21325974800004133
}
}
}
}
}
}
}