rezashr's picture
First Push
79541b8
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.055451512336731,
"min": 1.055451512336731,
"max": 2.867302656173706,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10089.0615234375,
"min": 10089.0615234375,
"max": 29364.046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.630255699157715,
"min": 0.37473225593566895,
"max": 12.630255699157715,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2462.89990234375,
"min": 72.69805908203125,
"max": 2534.5458984375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07089697661057488,
"min": 0.06155606434200023,
"max": 0.07594182110749934,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28358790644229953,
"min": 0.2462242573680009,
"max": 0.37970910553749665,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20045469679376657,
"min": 0.11664071251574319,
"max": 0.2900810149662635,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8018187871750663,
"min": 0.46656285006297277,
"max": 1.4299725510326087,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.5,
"min": 3.2954545454545454,
"max": 25.5,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1122.0,
"min": 145.0,
"max": 1352.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.5,
"min": 3.2954545454545454,
"max": 25.5,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1122.0,
"min": 145.0,
"max": 1352.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704603942",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704604411"
},
"total": 469.210015548,
"count": 1,
"self": 0.49232972999982394,
"children": {
"run_training.setup": {
"total": 0.050948766000033174,
"count": 1,
"self": 0.050948766000033174
},
"TrainerController.start_learning": {
"total": 468.66673705200014,
"count": 1,
"self": 0.6009158229993545,
"children": {
"TrainerController._reset_env": {
"total": 3.096576672000083,
"count": 1,
"self": 3.096576672000083
},
"TrainerController.advance": {
"total": 464.86970745100086,
"count": 18200,
"self": 0.28776632000074187,
"children": {
"env_step": {
"total": 464.5819411310001,
"count": 18200,
"self": 306.9815451479824,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.29733758002055,
"count": 18200,
"self": 1.5416031310237486,
"children": {
"TorchPolicy.evaluate": {
"total": 155.7557344489968,
"count": 18200,
"self": 155.7557344489968
}
}
},
"workers": {
"total": 0.30305840299718056,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 467.38201580499936,
"count": 18200,
"is_parallel": true,
"self": 229.65665746498996,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00543816100002914,
"count": 1,
"is_parallel": true,
"self": 0.0033157620000565657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021223989999725745,
"count": 10,
"is_parallel": true,
"self": 0.0021223989999725745
}
}
},
"UnityEnvironment.step": {
"total": 0.04150479700001597,
"count": 1,
"is_parallel": true,
"self": 0.0006752360000064073,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005172840000113865,
"count": 1,
"is_parallel": true,
"self": 0.0005172840000113865
},
"communicator.exchange": {
"total": 0.03832216000000699,
"count": 1,
"is_parallel": true,
"self": 0.03832216000000699
},
"steps_from_proto": {
"total": 0.001990116999991187,
"count": 1,
"is_parallel": true,
"self": 0.00042315000007420167,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015669669999169855,
"count": 10,
"is_parallel": true,
"self": 0.0015669669999169855
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 237.7253583400094,
"count": 18199,
"is_parallel": true,
"self": 11.12182515399354,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.731163699007652,
"count": 18199,
"is_parallel": true,
"self": 5.731163699007652
},
"communicator.exchange": {
"total": 184.87950279200174,
"count": 18199,
"is_parallel": true,
"self": 184.87950279200174
},
"steps_from_proto": {
"total": 35.992866695006455,
"count": 18199,
"is_parallel": true,
"self": 6.709548436988939,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.283318258017516,
"count": 181990,
"is_parallel": true,
"self": 29.283318258017516
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016788499988251715,
"count": 1,
"self": 0.00016788499988251715,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 459.0446566000501,
"count": 690866,
"is_parallel": true,
"self": 14.932108807123313,
"children": {
"process_trajectory": {
"total": 253.88687899792728,
"count": 690866,
"is_parallel": true,
"self": 253.02472981492724,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8621491830000423,
"count": 4,
"is_parallel": true,
"self": 0.8621491830000423
}
}
},
"_update_policy": {
"total": 190.22566879499948,
"count": 90,
"is_parallel": true,
"self": 61.26042637000319,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.9652424249963,
"count": 4584,
"is_parallel": true,
"self": 128.9652424249963
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09936922099996082,
"count": 1,
"self": 0.0010925769997811585,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09827664400017966,
"count": 1,
"self": 0.09827664400017966
}
}
}
}
}
}
}