sh-holmes's picture
initial result
43daca3
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.482830762863159,
"min": 2.482830762863159,
"max": 2.8493518829345703,
"count": 4
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 25399.359375,
"min": 25357.2890625,
"max": 29180.212890625,
"count": 4
},
"SnowballTarget.Step.mean": {
"value": 39984.0,
"min": 9952.0,
"max": 39984.0,
"count": 4
},
"SnowballTarget.Step.sum": {
"value": 39984.0,
"min": 9952.0,
"max": 39984.0,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 3.9534711837768555,
"min": 0.48707571625709534,
"max": 3.9534711837768555,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 810.4616088867188,
"min": 94.49269104003906,
"max": 810.4616088867188,
"count": 4
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 4
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 4
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06765790498112419,
"min": 0.065051287744609,
"max": 0.07303480284708533,
"count": 4
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3382895249056209,
"min": 0.260205150978436,
"max": 0.36517401423542667,
"count": 4
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.27724099130022756,
"min": 0.13018663041978418,
"max": 0.27724099130022756,
"count": 4
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.3862049565011378,
"min": 0.5207465216791367,
"max": 1.3862049565011378,
"count": 4
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.000247332017556,
"min": 0.000247332017556,
"max": 0.000291882002706,
"count": 4
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00123666008778,
"min": 0.001048728050424,
"max": 0.00138516003828,
"count": 4
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.182444,
"min": 0.182444,
"max": 0.19729400000000002,
"count": 4
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.91222,
"min": 0.7495760000000001,
"max": 0.96172,
"count": 4
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0041239556000000005,
"min": 0.0041239556000000005,
"max": 0.0048649706,
"count": 4
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.020619778000000002,
"min": 0.017483842399999998,
"max": 0.023089828,
"count": 4
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 11.545454545454545,
"min": 3.7045454545454546,
"max": 11.545454545454545,
"count": 4
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 635.0,
"min": 163.0,
"max": 635.0,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 11.545454545454545,
"min": 3.7045454545454546,
"max": 11.545454545454545,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 635.0,
"min": 163.0,
"max": 635.0,
"count": 4
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699064537",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699064659"
},
"total": 122.56977187300004,
"count": 1,
"self": 0.2883553300000017,
"children": {
"run_training.setup": {
"total": 0.04358193600000959,
"count": 1,
"self": 0.04358193600000959
},
"TrainerController.start_learning": {
"total": 122.23783460700002,
"count": 1,
"self": 0.131358784997758,
"children": {
"TrainerController._reset_env": {
"total": 8.567109713999969,
"count": 1,
"self": 8.567109713999969
},
"TrainerController.advance": {
"total": 113.40475710200229,
"count": 4345,
"self": 0.06543926700220482,
"children": {
"env_step": {
"total": 113.33931783500009,
"count": 4345,
"self": 77.4788793400026,
"children": {
"SubprocessEnvManager._take_step": {
"total": 35.79598615899937,
"count": 4345,
"self": 0.3448313740003073,
"children": {
"TorchPolicy.evaluate": {
"total": 35.45115478499906,
"count": 4345,
"self": 35.45115478499906
}
}
},
"workers": {
"total": 0.06445233599811218,
"count": 4344,
"self": 0.0,
"children": {
"worker_root": {
"total": 121.86400803100236,
"count": 4344,
"is_parallel": true,
"self": 64.9378748530035,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005095558999983041,
"count": 1,
"is_parallel": true,
"self": 0.0037185559999102225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013770030000728184,
"count": 10,
"is_parallel": true,
"self": 0.0013770030000728184
}
}
},
"UnityEnvironment.step": {
"total": 0.03484941599998592,
"count": 1,
"is_parallel": true,
"self": 0.0006114149999802976,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037075300002697986,
"count": 1,
"is_parallel": true,
"self": 0.00037075300002697986
},
"communicator.exchange": {
"total": 0.031985023000004276,
"count": 1,
"is_parallel": true,
"self": 0.031985023000004276
},
"steps_from_proto": {
"total": 0.001882224999974369,
"count": 1,
"is_parallel": true,
"self": 0.0003650260000540584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015171989999203106,
"count": 10,
"is_parallel": true,
"self": 0.0015171989999203106
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 56.92613317799885,
"count": 4343,
"is_parallel": true,
"self": 2.518965963998596,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.240735528998698,
"count": 4343,
"is_parallel": true,
"self": 1.240735528998698
},
"communicator.exchange": {
"total": 45.279818045999605,
"count": 4343,
"is_parallel": true,
"self": 45.279818045999605
},
"steps_from_proto": {
"total": 7.886613639001951,
"count": 4343,
"is_parallel": true,
"self": 1.4658210650021601,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.42079257399979,
"count": 43430,
"is_parallel": true,
"self": 6.42079257399979
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.932499998882122e-05,
"count": 1,
"self": 3.932499998882122e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 112.40726025899727,
"count": 132541,
"is_parallel": true,
"self": 2.5171253829980174,
"children": {
"process_trajectory": {
"total": 65.088488156999,
"count": 132541,
"is_parallel": true,
"self": 63.02058278199894,
"children": {
"RLTrainer._checkpoint": {
"total": 2.067905375000066,
"count": 9,
"is_parallel": true,
"self": 2.067905375000066
}
}
},
"_update_policy": {
"total": 44.801646719000246,
"count": 21,
"is_parallel": true,
"self": 17.039947505001237,
"children": {
"TorchPPOOptimizer.update": {
"total": 27.76169921399901,
"count": 1068,
"is_parallel": true,
"self": 27.76169921399901
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13456968100001632,
"count": 1,
"self": 0.0011175230000048941,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13345215800001142,
"count": 1,
"self": 0.13345215800001142
}
}
}
}
}
}
}