Jbot's picture
First time
a580112
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9433902502059937,
"min": 0.9433902502059937,
"max": 2.859973669052124,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9007.490234375,
"min": 9007.490234375,
"max": 29288.990234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.8394193649292,
"min": 0.4369000792503357,
"max": 12.8394193649292,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2503.686767578125,
"min": 84.75861358642578,
"max": 2606.0068359375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06741483299422316,
"min": 0.06184676985957434,
"max": 0.07980881659336864,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2696593319768926,
"min": 0.2674881041042253,
"max": 0.3670451788961609,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.209397202613307,
"min": 0.11605154936984821,
"max": 0.2806769926466194,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.837588810453228,
"min": 0.46420619747939285,
"max": 1.3476857963730309,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.25,
"min": 3.409090909090909,
"max": 25.25,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1111.0,
"min": 150.0,
"max": 1386.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.25,
"min": 3.409090909090909,
"max": 25.25,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1111.0,
"min": 150.0,
"max": 1386.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674414153",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674414590"
},
"total": 436.775219438,
"count": 1,
"self": 0.3910607340000638,
"children": {
"run_training.setup": {
"total": 0.12333285500000102,
"count": 1,
"self": 0.12333285500000102
},
"TrainerController.start_learning": {
"total": 436.26082584899996,
"count": 1,
"self": 0.5250637720090481,
"children": {
"TrainerController._reset_env": {
"total": 9.31975363700002,
"count": 1,
"self": 9.31975363700002
},
"TrainerController.advance": {
"total": 426.29191515099086,
"count": 18203,
"self": 0.26581534499752024,
"children": {
"env_step": {
"total": 426.02609980599334,
"count": 18203,
"self": 275.7374435539842,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.022489465008,
"count": 18203,
"self": 1.420615935007163,
"children": {
"TorchPolicy.evaluate": {
"total": 148.60187353000083,
"count": 18203,
"self": 33.729499033994614,
"children": {
"TorchPolicy.sample_actions": {
"total": 114.87237449600622,
"count": 18203,
"self": 114.87237449600622
}
}
}
}
},
"workers": {
"total": 0.26616678700116836,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 435.0300448499971,
"count": 18203,
"is_parallel": true,
"self": 211.55189190799513,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005654199999980847,
"count": 1,
"is_parallel": true,
"self": 0.003369094000106543,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002285105999874304,
"count": 10,
"is_parallel": true,
"self": 0.002285105999874304
}
}
},
"UnityEnvironment.step": {
"total": 0.038792546000024686,
"count": 1,
"is_parallel": true,
"self": 0.0005245030000082807,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000344211000026462,
"count": 1,
"is_parallel": true,
"self": 0.000344211000026462
},
"communicator.exchange": {
"total": 0.03611391499998717,
"count": 1,
"is_parallel": true,
"self": 0.03611391499998717
},
"steps_from_proto": {
"total": 0.0018099170000027698,
"count": 1,
"is_parallel": true,
"self": 0.000438416999884339,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013715000001184308,
"count": 10,
"is_parallel": true,
"self": 0.0013715000001184308
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 223.478152942002,
"count": 18202,
"is_parallel": true,
"self": 8.46519830399302,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.190888459006658,
"count": 18202,
"is_parallel": true,
"self": 5.190888459006658
},
"communicator.exchange": {
"total": 178.44701770000495,
"count": 18202,
"is_parallel": true,
"self": 178.44701770000495
},
"steps_from_proto": {
"total": 31.37504847899737,
"count": 18202,
"is_parallel": true,
"self": 6.670994370006554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.704054108990817,
"count": 182020,
"is_parallel": true,
"self": 24.704054108990817
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.9775000004738104e-05,
"count": 1,
"self": 3.9775000004738104e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 423.0582089980365,
"count": 342680,
"is_parallel": true,
"self": 9.724690289011562,
"children": {
"process_trajectory": {
"total": 243.00185648702558,
"count": 342680,
"is_parallel": true,
"self": 242.22279692502565,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7790595619999294,
"count": 4,
"is_parallel": true,
"self": 0.7790595619999294
}
}
},
"_update_policy": {
"total": 170.33166222199935,
"count": 90,
"is_parallel": true,
"self": 42.110309222998694,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.22135299900066,
"count": 4584,
"is_parallel": true,
"self": 128.22135299900066
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12405351400002473,
"count": 1,
"self": 0.0008972929999799817,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12315622100004475,
"count": 1,
"self": 0.12315622100004475
}
}
}
}
}
}
}