Curiolearner's picture
First Push
6e04029
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9809248447418213,
"min": 0.9809248447418213,
"max": 2.8695924282073975,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9419.8212890625,
"min": 9419.8212890625,
"max": 29419.060546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.434893608093262,
"min": 0.35331788659095764,
"max": 12.434893608093262,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2424.80419921875,
"min": 68.54367065429688,
"max": 2501.552490234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06148288606360153,
"min": 0.060758933093572574,
"max": 0.0751103808429987,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2459315442544061,
"min": 0.2430357323742903,
"max": 0.3755519042149935,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19038755158145054,
"min": 0.1292618820249724,
"max": 0.2802155288411122,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7615502063258022,
"min": 0.5170475280998896,
"max": 1.401077644205561,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.931818181818183,
"min": 3.772727272727273,
"max": 24.931818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1097.0,
"min": 166.0,
"max": 1343.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.931818181818183,
"min": 3.772727272727273,
"max": 24.931818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1097.0,
"min": 166.0,
"max": 1343.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687534208",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687534684"
},
"total": 476.41960470799995,
"count": 1,
"self": 0.7868718769999532,
"children": {
"run_training.setup": {
"total": 0.041033471999980975,
"count": 1,
"self": 0.041033471999980975
},
"TrainerController.start_learning": {
"total": 475.591699359,
"count": 1,
"self": 0.5684425610100448,
"children": {
"TrainerController._reset_env": {
"total": 4.026066561999983,
"count": 1,
"self": 4.026066561999983
},
"TrainerController.advance": {
"total": 470.7688216189901,
"count": 18231,
"self": 0.26231811798845683,
"children": {
"env_step": {
"total": 470.50650350100165,
"count": 18231,
"self": 344.1611316480077,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.07533617400156,
"count": 18231,
"self": 1.8856815300160292,
"children": {
"TorchPolicy.evaluate": {
"total": 124.18965464398553,
"count": 18231,
"self": 124.18965464398553
}
}
},
"workers": {
"total": 0.2700356789923717,
"count": 18231,
"self": 0.0,
"children": {
"worker_root": {
"total": 473.661803511991,
"count": 18231,
"is_parallel": true,
"self": 223.4906388899928,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00503459499998371,
"count": 1,
"is_parallel": true,
"self": 0.003538629999866316,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001495965000117394,
"count": 10,
"is_parallel": true,
"self": 0.001495965000117394
}
}
},
"UnityEnvironment.step": {
"total": 0.03569858999998132,
"count": 1,
"is_parallel": true,
"self": 0.0005727949999823068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031019799996556685,
"count": 1,
"is_parallel": true,
"self": 0.00031019799996556685
},
"communicator.exchange": {
"total": 0.032703114000014466,
"count": 1,
"is_parallel": true,
"self": 0.032703114000014466
},
"steps_from_proto": {
"total": 0.002112483000018983,
"count": 1,
"is_parallel": true,
"self": 0.00043444499999623076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016780380000227524,
"count": 10,
"is_parallel": true,
"self": 0.0016780380000227524
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 250.17116462199823,
"count": 18230,
"is_parallel": true,
"self": 10.481837223997331,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.477122158003965,
"count": 18230,
"is_parallel": true,
"self": 5.477122158003965
},
"communicator.exchange": {
"total": 198.08146589999814,
"count": 18230,
"is_parallel": true,
"self": 198.08146589999814
},
"steps_from_proto": {
"total": 36.1307393399988,
"count": 18230,
"is_parallel": true,
"self": 6.543469263003431,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.587270076995367,
"count": 182300,
"is_parallel": true,
"self": 29.587270076995367
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001414099999692553,
"count": 1,
"self": 0.0001414099999692553,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 467.2386430620365,
"count": 440719,
"is_parallel": true,
"self": 9.661212204052163,
"children": {
"process_trajectory": {
"total": 256.4827337289852,
"count": 440719,
"is_parallel": true,
"self": 255.13265043098522,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3500832979999586,
"count": 4,
"is_parallel": true,
"self": 1.3500832979999586
}
}
},
"_update_policy": {
"total": 201.09469712899914,
"count": 90,
"is_parallel": true,
"self": 78.78946812899898,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.30522900000017,
"count": 4587,
"is_parallel": true,
"self": 122.30522900000017
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22822720699991805,
"count": 1,
"self": 0.0010851659999389085,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22714204099997914,
"count": 1,
"self": 0.22714204099997914
}
}
}
}
}
}
}