dvesely's picture
First Push
d388c5a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7193920016288757,
"min": 0.644972026348114,
"max": 2.8559772968292236,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6979.541015625,
"min": 6230.97412109375,
"max": 29279.48046875,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.947929382324219,
"min": 0.4451306462287903,
"max": 14.07044792175293,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2859.325439453125,
"min": 86.3553466796875,
"max": 2870.371337890625,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06430221184643468,
"min": 0.06278411294539075,
"max": 0.07612753370374455,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.32151105923217344,
"min": 0.251136451781563,
"max": 0.3735430156133696,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15254325649755843,
"min": 0.14308350027045783,
"max": 0.2767700690447408,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7627162824877921,
"min": 0.5796235216891064,
"max": 1.373644238228307,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.0014540640153119996,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.1989176,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.30909090909091,
"min": 4.318181818181818,
"max": 27.477272727272727,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1502.0,
"min": 190.0,
"max": 1510.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.30909090909091,
"min": 4.318181818181818,
"max": 27.477272727272727,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1502.0,
"min": 190.0,
"max": 1510.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682765831",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682767043"
},
"total": 1211.921904452,
"count": 1,
"self": 0.7475630860001274,
"children": {
"run_training.setup": {
"total": 0.059591299999965486,
"count": 1,
"self": 0.059591299999965486
},
"TrainerController.start_learning": {
"total": 1211.114750066,
"count": 1,
"self": 1.2875781180132435,
"children": {
"TrainerController._reset_env": {
"total": 4.4236449590000575,
"count": 1,
"self": 4.4236449590000575
},
"TrainerController.advance": {
"total": 1205.0875946819867,
"count": 45501,
"self": 0.7013152209931377,
"children": {
"env_step": {
"total": 1204.3862794609936,
"count": 45501,
"self": 907.5355454139674,
"children": {
"SubprocessEnvManager._take_step": {
"total": 296.209017691006,
"count": 45501,
"self": 4.211431683007163,
"children": {
"TorchPolicy.evaluate": {
"total": 291.99758600799885,
"count": 45501,
"self": 291.99758600799885
}
}
},
"workers": {
"total": 0.641716356020197,
"count": 45501,
"self": 0.0,
"children": {
"worker_root": {
"total": 1207.4213552510369,
"count": 45501,
"is_parallel": true,
"self": 605.096152877025,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005515705999982856,
"count": 1,
"is_parallel": true,
"self": 0.004112745999805156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014029600001776998,
"count": 10,
"is_parallel": true,
"self": 0.0014029600001776998
}
}
},
"UnityEnvironment.step": {
"total": 0.07085110100001657,
"count": 1,
"is_parallel": true,
"self": 0.006187852999914867,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040852100005395187,
"count": 1,
"is_parallel": true,
"self": 0.00040852100005395187
},
"communicator.exchange": {
"total": 0.06241750799995316,
"count": 1,
"is_parallel": true,
"self": 0.06241750799995316
},
"steps_from_proto": {
"total": 0.001837219000094592,
"count": 1,
"is_parallel": true,
"self": 0.0003749050000578791,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001462314000036713,
"count": 10,
"is_parallel": true,
"self": 0.001462314000036713
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 602.3252023740118,
"count": 45500,
"is_parallel": true,
"self": 23.957158885989543,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.959156301995108,
"count": 45500,
"is_parallel": true,
"self": 12.959156301995108
},
"communicator.exchange": {
"total": 488.60255535303634,
"count": 45500,
"is_parallel": true,
"self": 488.60255535303634
},
"steps_from_proto": {
"total": 76.80633183299085,
"count": 45500,
"is_parallel": true,
"self": 14.946446573914727,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.85988525907612,
"count": 455000,
"is_parallel": true,
"self": 61.85988525907612
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001044529999489896,
"count": 1,
"self": 0.0001044529999489896,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1196.2852658851039,
"count": 1041870,
"is_parallel": true,
"self": 24.8510988141943,
"children": {
"process_trajectory": {
"total": 608.8936082789104,
"count": 1041870,
"is_parallel": true,
"self": 604.5513481869104,
"children": {
"RLTrainer._checkpoint": {
"total": 4.342260091999947,
"count": 10,
"is_parallel": true,
"self": 4.342260091999947
}
}
},
"_update_policy": {
"total": 562.540558791999,
"count": 227,
"is_parallel": true,
"self": 222.3576635050016,
"children": {
"TorchPPOOptimizer.update": {
"total": 340.18289528699745,
"count": 15432,
"is_parallel": true,
"self": 340.18289528699745
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.3158278539999628,
"count": 1,
"self": 0.0034862089999023738,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31234164500006045,
"count": 1,
"self": 0.31234164500006045
}
}
}
}
}
}
}