SakataHalmi's picture
First Push
8a65f6d
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.4185644388198853,
"min": 1.4185644388198853,
"max": 2.887559175491333,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13560.0576171875,
"min": 13560.0576171875,
"max": 29603.2578125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.220643997192383,
"min": 0.19058948755264282,
"max": 10.220643997192383,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1993.025634765625,
"min": 36.974361419677734,
"max": 2007.1842041015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.931818181818183,
"min": 2.6136363636363638,
"max": 21.931818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 965.0,
"min": 115.0,
"max": 1182.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.931818181818183,
"min": 2.6136363636363638,
"max": 21.931818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 965.0,
"min": 115.0,
"max": 1182.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.033617274566495325,
"min": 0.02740534193920515,
"max": 0.04224848162165371,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06723454913299065,
"min": 0.0548106838784103,
"max": 0.12674544486496114,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.31384649748603505,
"min": 0.10564297313491504,
"max": 0.31384649748603505,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6276929949720701,
"min": 0.21128594626983008,
"max": 0.9129869521905978,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.432097856000005e-06,
"min": 6.432097856000005e-06,
"max": 0.000290232003256,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.286419571200001e-05,
"min": 1.286419571200001e-05,
"max": 0.0007419960526679999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10214400000000001,
"min": 0.10214400000000001,
"max": 0.19674400000000003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20428800000000003,
"min": 0.20428800000000003,
"max": 0.547332,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011698560000000009,
"min": 0.00011698560000000009,
"max": 0.0048375256,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023397120000000018,
"min": 0.00023397120000000018,
"max": 0.012371866799999999,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696080253",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696080744"
},
"total": 490.77944430499997,
"count": 1,
"self": 0.7901492609998968,
"children": {
"run_training.setup": {
"total": 0.0434754810000868,
"count": 1,
"self": 0.0434754810000868
},
"TrainerController.start_learning": {
"total": 489.945819563,
"count": 1,
"self": 0.655497467013447,
"children": {
"TrainerController._reset_env": {
"total": 5.575525820000053,
"count": 1,
"self": 5.575525820000053
},
"TrainerController.advance": {
"total": 483.4776262639865,
"count": 18218,
"self": 0.33178842898996663,
"children": {
"env_step": {
"total": 483.14583783499654,
"count": 18218,
"self": 339.91112397198935,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.91074431499555,
"count": 18218,
"self": 1.8763184319890343,
"children": {
"TorchPolicy.evaluate": {
"total": 141.03442588300652,
"count": 18218,
"self": 141.03442588300652
}
}
},
"workers": {
"total": 0.32396954801163247,
"count": 18218,
"self": 0.0,
"children": {
"worker_root": {
"total": 488.1064464530068,
"count": 18218,
"is_parallel": true,
"self": 216.9528564330068,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006295354000030784,
"count": 1,
"is_parallel": true,
"self": 0.0040244200000643104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022709339999664735,
"count": 10,
"is_parallel": true,
"self": 0.0022709339999664735
}
}
},
"UnityEnvironment.step": {
"total": 0.04068133700002363,
"count": 1,
"is_parallel": true,
"self": 0.00075736000019333,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004859399999759262,
"count": 1,
"is_parallel": true,
"self": 0.0004859399999759262
},
"communicator.exchange": {
"total": 0.036036939999917195,
"count": 1,
"is_parallel": true,
"self": 0.036036939999917195
},
"steps_from_proto": {
"total": 0.003401096999937181,
"count": 1,
"is_parallel": true,
"self": 0.0014863029999787614,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019147939999584196,
"count": 10,
"is_parallel": true,
"self": 0.0019147939999584196
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 271.15359002,
"count": 18217,
"is_parallel": true,
"self": 11.083371618984415,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.968688108015044,
"count": 18217,
"is_parallel": true,
"self": 5.968688108015044
},
"communicator.exchange": {
"total": 213.8578194570082,
"count": 18217,
"is_parallel": true,
"self": 213.8578194570082
},
"steps_from_proto": {
"total": 40.243710835992374,
"count": 18217,
"is_parallel": true,
"self": 7.519828291031331,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.72388254496104,
"count": 182170,
"is_parallel": true,
"self": 32.72388254496104
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001624809999611898,
"count": 1,
"self": 0.0001624809999611898,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 478.1618783570483,
"count": 602144,
"is_parallel": true,
"self": 14.194099028059554,
"children": {
"process_trajectory": {
"total": 340.1733329999877,
"count": 602144,
"is_parallel": true,
"self": 338.44719053598794,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7261424639997358,
"count": 4,
"is_parallel": true,
"self": 1.7261424639997358
}
}
},
"_update_policy": {
"total": 123.79444632900106,
"count": 45,
"is_parallel": true,
"self": 73.8598153110022,
"children": {
"TorchPPOOptimizer.update": {
"total": 49.93463101799887,
"count": 1080,
"is_parallel": true,
"self": 49.93463101799887
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23700753100001748,
"count": 1,
"self": 0.0011755620000712952,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23583196899994618,
"count": 1,
"self": 0.23583196899994618
}
}
}
}
}
}
}