habanoz's picture
First model
d4f9001
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.320796251296997,
"min": 1.320796251296997,
"max": 2.0667824745178223,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6741.34423828125,
"min": 5918.4384765625,
"max": 10687.48046875,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 399968.0,
"min": 204960.0,
"max": 399968.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 399968.0,
"min": 204960.0,
"max": 399968.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.445138931274414,
"min": 9.19918441772461,
"max": 12.445138931274414,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1207.178466796875,
"min": 886.4176025390625,
"max": 1328.1622314453125,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06158062665353474,
"min": 0.058522910532523315,
"max": 0.07950750454609293,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.12316125330706948,
"min": 0.11704582106504663,
"max": 0.23201147846389086,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21167126108033985,
"min": 0.19307311478198744,
"max": 0.31113601096120536,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4233425221606797,
"min": 0.3861462295639749,
"max": 0.8722384087010926,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.307099231000009e-06,
"min": 2.307099231000009e-06,
"max": 0.000147507050831,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.614198462000018e-06,
"min": 4.614198462000018e-06,
"max": 0.000410346163218,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10076900000000001,
"min": 0.10076900000000001,
"max": 0.14916900000000005,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20153800000000002,
"min": 0.20153800000000002,
"max": 0.436782,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0003944231000000015,
"min": 0.0003944231000000015,
"max": 0.024589583099999996,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.000788846200000003,
"min": 0.000788846200000003,
"max": 0.06840732179999999,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 4378.0,
"min": 4378.0,
"max": 6567.0,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.681818181818183,
"min": 17.363636363636363,
"max": 25.363636363636363,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 543.0,
"min": 382.0,
"max": 795.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.681818181818183,
"min": 17.363636363636363,
"max": 25.363636363636363,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 543.0,
"min": 382.0,
"max": 795.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673553299",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --resume --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --results-dir=/content/drive/MyDrive/logs",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673553690"
},
"total": 391.0180839159998,
"count": 1,
"self": 0.4442750389998764,
"children": {
"run_training.setup": {
"total": 0.11581376200001614,
"count": 1,
"self": 0.11581376200001614
},
"TrainerController.start_learning": {
"total": 390.4579951149999,
"count": 1,
"self": 0.3238755030261018,
"children": {
"TrainerController._reset_env": {
"total": 6.104598805000023,
"count": 1,
"self": 6.104598805000023
},
"TrainerController.advance": {
"total": 383.8695893709737,
"count": 18192,
"self": 0.34773727899482765,
"children": {
"env_step": {
"total": 276.6268127480007,
"count": 18192,
"self": 224.8743356819673,
"children": {
"SubprocessEnvManager._take_step": {
"total": 51.54614382700083,
"count": 18192,
"self": 1.2181476529776774,
"children": {
"TorchPolicy.evaluate": {
"total": 50.327996174023156,
"count": 18192,
"self": 11.925487167009123,
"children": {
"TorchPolicy.sample_actions": {
"total": 38.40250900701403,
"count": 18192,
"self": 38.40250900701403
}
}
}
}
},
"workers": {
"total": 0.20633323903257406,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 389.07266259598464,
"count": 18192,
"is_parallel": true,
"self": 189.33335503895114,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019612329997471534,
"count": 1,
"is_parallel": true,
"self": 0.0007097489997249795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012514840000221739,
"count": 10,
"is_parallel": true,
"self": 0.0012514840000221739
}
}
},
"UnityEnvironment.step": {
"total": 0.03415589399992314,
"count": 1,
"is_parallel": true,
"self": 0.0005557809995480056,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003800089998549083,
"count": 1,
"is_parallel": true,
"self": 0.0003800089998549083
},
"communicator.exchange": {
"total": 0.03100728600020375,
"count": 1,
"is_parallel": true,
"self": 0.03100728600020375
},
"steps_from_proto": {
"total": 0.0022128180003164744,
"count": 1,
"is_parallel": true,
"self": 0.0004232080000292626,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017896100002872117,
"count": 10,
"is_parallel": true,
"self": 0.0017896100002872117
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 199.7393075570335,
"count": 18191,
"is_parallel": true,
"self": 8.969154611038448,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.518197046977093,
"count": 18191,
"is_parallel": true,
"self": 5.518197046977093
},
"communicator.exchange": {
"total": 148.75100725997845,
"count": 18191,
"is_parallel": true,
"self": 148.75100725997845
},
"steps_from_proto": {
"total": 36.50094863903951,
"count": 18191,
"is_parallel": true,
"self": 7.449160291246244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.051788347793263,
"count": 181910,
"is_parallel": true,
"self": 29.051788347793263
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 106.89503934397817,
"count": 18192,
"self": 0.4096248180408111,
"children": {
"process_trajectory": {
"total": 26.46589423893829,
"count": 18192,
"self": 26.46589423893829
},
"_update_policy": {
"total": 80.01952028699907,
"count": 90,
"self": 43.141345680991435,
"children": {
"TorchPPOOptimizer.update": {
"total": 36.87817460600763,
"count": 4587,
"self": 36.87817460600763
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.629998203308787e-07,
"count": 1,
"self": 9.629998203308787e-07
},
"TrainerController._save_models": {
"total": 0.15993047300025864,
"count": 1,
"self": 0.0061853650004195515,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1537451079998391,
"count": 1,
"self": 0.1537451079998391
}
}
}
}
}
}
}