juansebashr's picture
Second Push
bc77f53
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0899980068206787,
"min": 1.0899980068206787,
"max": 2.0142664909362793,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10515.2109375,
"min": 10515.2109375,
"max": 20672.41796875,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 499941.0,
"min": 209989.0,
"max": 499941.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 499941.0,
"min": 209989.0,
"max": 499941.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5099332332611084,
"min": 1.5659252405166626,
"max": 2.5099332332611084,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 512.0263671875,
"min": 302.22357177734375,
"max": 512.0263671875,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.163636363636364,
"min": 16.0,
"max": 25.163636363636364,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1384.0,
"min": 704.0,
"max": 1384.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.163636363636364,
"min": 16.0,
"max": 25.163636363636364,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1384.0,
"min": 704.0,
"max": 1384.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.05126684903225396,
"min": 0.04584989431561553,
"max": 0.05524927940336056,
"count": 28
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.05126684903225396,
"min": 0.04584989431561553,
"max": 0.05524927940336056,
"count": 28
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.13929917298257352,
"min": 0.1342815386876464,
"max": 0.16858418747782708,
"count": 28
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.13929917298257352,
"min": 0.1342815386876464,
"max": 0.16858418747782708,
"count": 28
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4598985401999997e-06,
"min": 1.4598985401999997e-06,
"max": 5.7797442202600006e-05,
"count": 28
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.4598985401999997e-06,
"min": 1.4598985401999997e-06,
"max": 5.7797442202600006e-05,
"count": 28
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10145980000000002,
"min": 0.10145980000000002,
"max": 0.1577974,
"count": 28
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10145980000000002,
"min": 0.10145980000000002,
"max": 0.1577974,
"count": 28
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.284402e-05,
"min": 8.284402e-05,
"max": 0.0028940902600000005,
"count": 28
},
"SnowballTarget.Policy.Beta.sum": {
"value": 8.284402e-05,
"min": 8.284402e-05,
"max": 0.0028940902600000005,
"count": 28
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678766969",
"python_version": "3.9.5 (default, Nov 23 2021, 15:27:38) \n[GCC 9.3.0]",
"command_line_arguments": "/home/sebastian/.virtualenvs/deep_rl/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1678767579"
},
"total": 610.5692535539993,
"count": 1,
"self": 0.21859918699919945,
"children": {
"run_training.setup": {
"total": 0.009277031000237912,
"count": 1,
"self": 0.009277031000237912
},
"TrainerController.start_learning": {
"total": 610.3413773359998,
"count": 1,
"self": 0.6872342400256457,
"children": {
"TrainerController._reset_env": {
"total": 2.035470527000143,
"count": 1,
"self": 2.035470527000143
},
"TrainerController.advance": {
"total": 607.531585887974,
"count": 27275,
"self": 0.3013475419538736,
"children": {
"env_step": {
"total": 607.2302383460201,
"count": 27275,
"self": 388.60798734409855,
"children": {
"SubprocessEnvManager._take_step": {
"total": 218.3174869959539,
"count": 27275,
"self": 1.5738390700316813,
"children": {
"TorchPolicy.evaluate": {
"total": 216.74364792592223,
"count": 27275,
"self": 216.74364792592223
}
}
},
"workers": {
"total": 0.30476400596762687,
"count": 27275,
"self": 0.0,
"children": {
"worker_root": {
"total": 609.2075701940394,
"count": 27275,
"is_parallel": true,
"self": 368.6318694520169,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013228629995865049,
"count": 1,
"is_parallel": true,
"self": 0.0004001460001745727,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009227169994119322,
"count": 10,
"is_parallel": true,
"self": 0.0009227169994119322
}
}
},
"UnityEnvironment.step": {
"total": 0.01990859299985459,
"count": 1,
"is_parallel": true,
"self": 0.0002577320001364569,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002522830000089016,
"count": 1,
"is_parallel": true,
"self": 0.0002522830000089016
},
"communicator.exchange": {
"total": 0.01867592100006732,
"count": 1,
"is_parallel": true,
"self": 0.01867592100006732
},
"steps_from_proto": {
"total": 0.000722656999641913,
"count": 1,
"is_parallel": true,
"self": 0.0001533209988338058,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005693360008081072,
"count": 10,
"is_parallel": true,
"self": 0.0005693360008081072
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 240.5757007420225,
"count": 27274,
"is_parallel": true,
"self": 6.1114115691034385,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.7374371960195276,
"count": 27274,
"is_parallel": true,
"self": 3.7374371960195276
},
"communicator.exchange": {
"total": 212.46255429088797,
"count": 27274,
"is_parallel": true,
"self": 212.46255429088797
},
"steps_from_proto": {
"total": 18.26429768601156,
"count": 27274,
"is_parallel": true,
"self": 4.015080995896824,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.249216690114736,
"count": 272740,
"is_parallel": true,
"self": 14.249216690114736
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.861099998583086e-05,
"count": 1,
"self": 5.861099998583086e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 603.7645514752248,
"count": 810009,
"is_parallel": true,
"self": 11.483299925617757,
"children": {
"process_trajectory": {
"total": 415.5946609176117,
"count": 810009,
"is_parallel": true,
"self": 414.73243225061196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8622286669997266,
"count": 6,
"is_parallel": true,
"self": 0.8622286669997266
}
}
},
"_update_policy": {
"total": 176.6865906319954,
"count": 28,
"is_parallel": true,
"self": 61.52528520197757,
"children": {
"TorchPPOOptimizer.update": {
"total": 115.16130543001782,
"count": 5670,
"is_parallel": true,
"self": 115.16130543001782
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08702807000008761,
"count": 1,
"self": 0.0019151289998262655,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08511294100026134,
"count": 1,
"self": 0.08511294100026134
}
}
}
}
}
}
}