Sanyam0605's picture
First Push
1c3c6ff
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9926850199699402,
"min": 0.9926850199699402,
"max": 2.867450475692749,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9489.076171875,
"min": 9489.076171875,
"max": 29428.64453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.939508438110352,
"min": 0.27785298228263855,
"max": 12.939508438110352,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2523.2041015625,
"min": 53.90347671508789,
"max": 2605.0546875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06625451121990587,
"min": 0.06107088410245328,
"max": 0.07569648650207775,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2650180448796235,
"min": 0.2491623279861147,
"max": 0.37848243251038877,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19765615594737668,
"min": 0.12255657784636223,
"max": 0.27518388897764917,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7906246237895067,
"min": 0.4902263113854489,
"max": 1.375919444888246,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.363636363636363,
"min": 3.409090909090909,
"max": 25.509090909090908,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1116.0,
"min": 150.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.363636363636363,
"min": 3.409090909090909,
"max": 25.509090909090908,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1116.0,
"min": 150.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695247215",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695247696"
},
"total": 480.623977955,
"count": 1,
"self": 0.44287001200007126,
"children": {
"run_training.setup": {
"total": 0.04485153799998898,
"count": 1,
"self": 0.04485153799998898
},
"TrainerController.start_learning": {
"total": 480.1362564049999,
"count": 1,
"self": 0.5652673620038513,
"children": {
"TrainerController._reset_env": {
"total": 4.20554822500003,
"count": 1,
"self": 4.20554822500003
},
"TrainerController.advance": {
"total": 475.2218530569961,
"count": 18201,
"self": 0.2726524750014505,
"children": {
"env_step": {
"total": 474.94920058199466,
"count": 18201,
"self": 343.5097114079904,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.15660577000733,
"count": 18201,
"self": 1.7648672399991483,
"children": {
"TorchPolicy.evaluate": {
"total": 129.39173853000818,
"count": 18201,
"self": 129.39173853000818
}
}
},
"workers": {
"total": 0.2828834039969479,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 478.53319421499634,
"count": 18201,
"is_parallel": true,
"self": 224.93434464099357,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005382724000014605,
"count": 1,
"is_parallel": true,
"self": 0.0038305800001126045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015521439999020004,
"count": 10,
"is_parallel": true,
"self": 0.0015521439999020004
}
}
},
"UnityEnvironment.step": {
"total": 0.04662003199996434,
"count": 1,
"is_parallel": true,
"self": 0.00042198699998152733,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032493100002284336,
"count": 1,
"is_parallel": true,
"self": 0.00032493100002284336
},
"communicator.exchange": {
"total": 0.04225446499992813,
"count": 1,
"is_parallel": true,
"self": 0.04225446499992813
},
"steps_from_proto": {
"total": 0.003618649000031837,
"count": 1,
"is_parallel": true,
"self": 0.00039137199985361804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003227277000178219,
"count": 10,
"is_parallel": true,
"self": 0.003227277000178219
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 253.59884957400277,
"count": 18200,
"is_parallel": true,
"self": 10.900925594013074,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.518097346998047,
"count": 18200,
"is_parallel": true,
"self": 5.518097346998047
},
"communicator.exchange": {
"total": 199.75616402200365,
"count": 18200,
"is_parallel": true,
"self": 199.75616402200365
},
"steps_from_proto": {
"total": 37.423662610988,
"count": 18200,
"is_parallel": true,
"self": 6.776914601997191,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.646748008990812,
"count": 182000,
"is_parallel": true,
"self": 30.646748008990812
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015229699988594803,
"count": 1,
"self": 0.00015229699988594803,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 471.40222332396024,
"count": 456252,
"is_parallel": true,
"self": 10.550309554916225,
"children": {
"process_trajectory": {
"total": 259.29307071104415,
"count": 456252,
"is_parallel": true,
"self": 257.62515360904445,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6679171019997057,
"count": 4,
"is_parallel": true,
"self": 1.6679171019997057
}
}
},
"_update_policy": {
"total": 201.55884305799987,
"count": 90,
"is_parallel": true,
"self": 76.88330962800228,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.67553342999759,
"count": 4587,
"is_parallel": true,
"self": 124.67553342999759
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14343546400004925,
"count": 1,
"self": 0.0008276690000457165,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14260779500000353,
"count": 1,
"self": 0.14260779500000353
}
}
}
}
}
}
}