Brain22's picture
First Push
2b53586
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5874015092849731,
"min": 0.5874015092849731,
"max": 2.558105945587158,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 29347.75390625,
"min": 29347.75390625,
"max": 129243.1875,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.570233345031738,
"min": 2.4594948291778564,
"max": 13.570233345031738,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 13610.9443359375,
"min": 2439.81884765625,
"max": 13610.9443359375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 50347.0,
"min": 48158.0,
"max": 50347.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06764392475474991,
"min": 0.06575801722675689,
"max": 0.07061255587323169,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 1.555810269359248,
"min": 1.4466763789886516,
"max": 1.5997030806217534,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18110586030175313,
"min": 0.18110586030175313,
"max": 0.2492557913191991,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 4.165434786940322,
"min": 4.04050108527436,
"max": 5.732883200341579,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.955295022400004e-06,
"min": 9.955295022400004e-06,
"max": 0.00018991520504240002,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00022897178551520007,
"min": 0.00022897178551520007,
"max": 0.0041781345109328,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10497759999999999,
"min": 0.10497759999999999,
"max": 0.19495760000000004,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 2.4144848,
"min": 2.4144848,
"max": 4.289067200000001,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002583822400000001,
"min": 0.0002583822400000001,
"max": 0.0047483842399999995,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0059427915200000025,
"min": 0.0059427915200000025,
"max": 0.10446445328,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.509881422924902,
"min": 7.805785123966942,
"max": 26.509881422924902,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 6707.0,
"min": 1889.0,
"max": 6707.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.509881422924902,
"min": 7.805785123966942,
"max": 26.509881422924902,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 6707.0,
"min": 1889.0,
"max": 6707.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677130189",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677131299"
},
"total": 1110.0868170689998,
"count": 1,
"self": 0.4336108989998593,
"children": {
"run_training.setup": {
"total": 0.10710819400003402,
"count": 1,
"self": 0.10710819400003402
},
"TrainerController.start_learning": {
"total": 1109.546097976,
"count": 1,
"self": 1.285786027993936,
"children": {
"TrainerController._reset_env": {
"total": 9.289196360999995,
"count": 1,
"self": 9.289196360999995
},
"TrainerController.advance": {
"total": 1098.854096828006,
"count": 45478,
"self": 0.6433595239864189,
"children": {
"env_step": {
"total": 1098.2107373040196,
"count": 45478,
"self": 750.7752985510313,
"children": {
"SubprocessEnvManager._take_step": {
"total": 346.77179410597967,
"count": 45478,
"self": 3.6747485119655607,
"children": {
"TorchPolicy.evaluate": {
"total": 343.0970455940141,
"count": 45478,
"self": 77.20695305201997,
"children": {
"TorchPolicy.sample_actions": {
"total": 265.89009254199414,
"count": 45478,
"self": 265.89009254199414
}
}
}
}
},
"workers": {
"total": 0.6636446470087662,
"count": 45478,
"self": 0.0,
"children": {
"worker_root": {
"total": 1105.8759041549954,
"count": 45478,
"is_parallel": true,
"self": 532.9559567119921,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005668463999995765,
"count": 1,
"is_parallel": true,
"self": 0.004038370999921881,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016300930000738845,
"count": 10,
"is_parallel": true,
"self": 0.0016300930000738845
}
}
},
"UnityEnvironment.step": {
"total": 0.03394184899997299,
"count": 1,
"is_parallel": true,
"self": 0.0005756489999271253,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042431200000692115,
"count": 1,
"is_parallel": true,
"self": 0.00042431200000692115
},
"communicator.exchange": {
"total": 0.03119385500002636,
"count": 1,
"is_parallel": true,
"self": 0.03119385500002636
},
"steps_from_proto": {
"total": 0.0017480330000125832,
"count": 1,
"is_parallel": true,
"self": 0.0004180980001251555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013299349998874277,
"count": 10,
"is_parallel": true,
"self": 0.0013299349998874277
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 572.9199474430033,
"count": 45477,
"is_parallel": true,
"self": 23.724157223033558,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.759196698975188,
"count": 45477,
"is_parallel": true,
"self": 12.759196698975188
},
"communicator.exchange": {
"total": 462.23686106001884,
"count": 45477,
"is_parallel": true,
"self": 462.23686106001884
},
"steps_from_proto": {
"total": 74.19973246097561,
"count": 45477,
"is_parallel": true,
"self": 16.219369927897958,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.98036253307765,
"count": 454770,
"is_parallel": true,
"self": 57.98036253307765
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019000700012838934,
"count": 1,
"self": 0.00019000700012838934,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1090.9227304309447,
"count": 984597,
"is_parallel": true,
"self": 24.49704232588965,
"children": {
"process_trajectory": {
"total": 620.5514526460553,
"count": 984597,
"is_parallel": true,
"self": 618.7436951670552,
"children": {
"RLTrainer._checkpoint": {
"total": 1.807757479000088,
"count": 10,
"is_parallel": true,
"self": 1.807757479000088
}
}
},
"_update_policy": {
"total": 445.87423545899975,
"count": 227,
"is_parallel": true,
"self": 153.68665591800323,
"children": {
"TorchPPOOptimizer.update": {
"total": 292.1875795409965,
"count": 11574,
"is_parallel": true,
"self": 292.1875795409965
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11682875199994669,
"count": 1,
"self": 0.0008816199999728269,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11594713199997386,
"count": 1,
"self": 0.11594713199997386
}
}
}
}
}
}
}