rlsn's picture
First Push
c931a02
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.822943925857544,
"min": 0.822943925857544,
"max": 2.855656623840332,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7848.416015625,
"min": 7848.416015625,
"max": 29307.603515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.040994644165039,
"min": 0.35027793049812317,
"max": 12.040994644165039,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2347.993896484375,
"min": 67.95391845703125,
"max": 2442.960693359375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07108374858530714,
"min": 0.0605192747235289,
"max": 0.0760711023989516,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28433499434122855,
"min": 0.2581613375109109,
"max": 0.35689931071372516,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17985660944353132,
"min": 0.13524881294271088,
"max": 0.2769933935473947,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7194264377741253,
"min": 0.5409952517708435,
"max": 1.3849669677369736,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.068181818181817,
"min": 3.6818181818181817,
"max": 24.068181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1059.0,
"min": 162.0,
"max": 1295.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.068181818181817,
"min": 3.6818181818181817,
"max": 24.068181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1059.0,
"min": 162.0,
"max": 1295.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682397148",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682397618"
},
"total": 470.722500742,
"count": 1,
"self": 0.43744529400009924,
"children": {
"run_training.setup": {
"total": 0.11893624199996111,
"count": 1,
"self": 0.11893624199996111
},
"TrainerController.start_learning": {
"total": 470.16611920599996,
"count": 1,
"self": 0.5638819490108062,
"children": {
"TrainerController._reset_env": {
"total": 3.7358272189999866,
"count": 1,
"self": 3.7358272189999866
},
"TrainerController.advance": {
"total": 465.7329251669891,
"count": 18200,
"self": 0.2722297549983068,
"children": {
"env_step": {
"total": 465.4606954119908,
"count": 18200,
"self": 340.1765077069773,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.01027146500525,
"count": 18200,
"self": 1.7730930740170265,
"children": {
"TorchPolicy.evaluate": {
"total": 123.23717839098822,
"count": 18200,
"self": 123.23717839098822
}
}
},
"workers": {
"total": 0.27391624000824777,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 468.74376805901795,
"count": 18200,
"is_parallel": true,
"self": 218.16017643202542,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0056334050000259595,
"count": 1,
"is_parallel": true,
"self": 0.004153114000132518,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001480290999893441,
"count": 10,
"is_parallel": true,
"self": 0.001480290999893441
}
}
},
"UnityEnvironment.step": {
"total": 0.05254550699999072,
"count": 1,
"is_parallel": true,
"self": 0.0005587620000255811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003530209999667022,
"count": 1,
"is_parallel": true,
"self": 0.0003530209999667022
},
"communicator.exchange": {
"total": 0.04983627999996543,
"count": 1,
"is_parallel": true,
"self": 0.04983627999996543
},
"steps_from_proto": {
"total": 0.0017974440000330105,
"count": 1,
"is_parallel": true,
"self": 0.0003680850001046565,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001429358999928354,
"count": 10,
"is_parallel": true,
"self": 0.001429358999928354
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 250.58359162699253,
"count": 18199,
"is_parallel": true,
"self": 9.949467521987742,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.654649755010155,
"count": 18199,
"is_parallel": true,
"self": 5.654649755010155
},
"communicator.exchange": {
"total": 201.9492037630041,
"count": 18199,
"is_parallel": true,
"self": 201.9492037630041
},
"steps_from_proto": {
"total": 33.030270586990525,
"count": 18199,
"is_parallel": true,
"self": 6.5946217399941816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.435648846996344,
"count": 181990,
"is_parallel": true,
"self": 26.435648846996344
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.5806000028060225e-05,
"count": 1,
"self": 3.5806000028060225e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 462.2251046999789,
"count": 411257,
"is_parallel": true,
"self": 10.081140736990506,
"children": {
"process_trajectory": {
"total": 253.45757264398765,
"count": 411257,
"is_parallel": true,
"self": 252.38908545098764,
"children": {
"RLTrainer._checkpoint": {
"total": 1.06848719300001,
"count": 4,
"is_parallel": true,
"self": 1.06848719300001
}
}
},
"_update_policy": {
"total": 198.68639131900073,
"count": 90,
"is_parallel": true,
"self": 78.25193738999462,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.43445392900611,
"count": 4587,
"is_parallel": true,
"self": 120.43445392900611
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13344906500003617,
"count": 1,
"self": 0.0009316880000369565,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13251737699999921,
"count": 1,
"self": 0.13251737699999921
}
}
}
}
}
}
}