tranquocthanh's picture
First Push
49d42f9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8773307800292969,
"min": 0.8773307800292969,
"max": 2.8595941066741943,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8367.103515625,
"min": 8367.103515625,
"max": 29285.103515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.767614364624023,
"min": 0.39160647988319397,
"max": 12.767614364624023,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2489.684814453125,
"min": 75.9716567993164,
"max": 2574.61669921875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06988539598829697,
"min": 0.062160651088753884,
"max": 0.07095712043230698,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2795415839531879,
"min": 0.271966693359062,
"max": 0.353641130923814,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2097193920144848,
"min": 0.12508311309833445,
"max": 0.2855486908379723,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8388775680579392,
"min": 0.5003324523933378,
"max": 1.4277434541898615,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.136363636363637,
"min": 3.5,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1106.0,
"min": 154.0,
"max": 1368.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.136363636363637,
"min": 3.5,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1106.0,
"min": 154.0,
"max": 1368.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703173601",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703174061"
},
"total": 460.0158814860001,
"count": 1,
"self": 0.4348603980000689,
"children": {
"run_training.setup": {
"total": 0.05248940000001312,
"count": 1,
"self": 0.05248940000001312
},
"TrainerController.start_learning": {
"total": 459.52853168800004,
"count": 1,
"self": 0.6364396899821259,
"children": {
"TrainerController._reset_env": {
"total": 3.0075279349999846,
"count": 1,
"self": 3.0075279349999846
},
"TrainerController.advance": {
"total": 455.79287589301794,
"count": 18198,
"self": 0.2972847760189552,
"children": {
"env_step": {
"total": 455.495591116999,
"count": 18198,
"self": 301.204888548029,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.97574613598977,
"count": 18198,
"self": 1.5385604700010163,
"children": {
"TorchPolicy.evaluate": {
"total": 152.43718566598875,
"count": 18198,
"self": 152.43718566598875
}
}
},
"workers": {
"total": 0.3149564329802388,
"count": 18198,
"self": 0.0,
"children": {
"worker_root": {
"total": 458.16979431901086,
"count": 18198,
"is_parallel": true,
"self": 225.24488932899794,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005070950999993329,
"count": 1,
"is_parallel": true,
"self": 0.003806755000141493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001264195999851836,
"count": 10,
"is_parallel": true,
"self": 0.001264195999851836
}
}
},
"UnityEnvironment.step": {
"total": 0.03714684900000975,
"count": 1,
"is_parallel": true,
"self": 0.0006850840001106917,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004325649999827874,
"count": 1,
"is_parallel": true,
"self": 0.0004325649999827874
},
"communicator.exchange": {
"total": 0.033844806999923094,
"count": 1,
"is_parallel": true,
"self": 0.033844806999923094
},
"steps_from_proto": {
"total": 0.002184392999993179,
"count": 1,
"is_parallel": true,
"self": 0.000503265999782343,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016811270002108358,
"count": 10,
"is_parallel": true,
"self": 0.0016811270002108358
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 232.92490499001292,
"count": 18197,
"is_parallel": true,
"self": 10.931274219026932,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.638439904001643,
"count": 18197,
"is_parallel": true,
"self": 5.638439904001643
},
"communicator.exchange": {
"total": 181.1039369570052,
"count": 18197,
"is_parallel": true,
"self": 181.1039369570052
},
"steps_from_proto": {
"total": 35.25125390997914,
"count": 18197,
"is_parallel": true,
"self": 6.603404031001787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.647849878977354,
"count": 181970,
"is_parallel": true,
"self": 28.647849878977354
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001498159999755444,
"count": 1,
"self": 0.0001498159999755444,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 450.1196328269541,
"count": 689264,
"is_parallel": true,
"self": 14.5854843749172,
"children": {
"process_trajectory": {
"total": 248.01048369703744,
"count": 689264,
"is_parallel": true,
"self": 247.3092879080374,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7011957890000531,
"count": 4,
"is_parallel": true,
"self": 0.7011957890000531
}
}
},
"_update_policy": {
"total": 187.52366475499946,
"count": 90,
"is_parallel": true,
"self": 57.943368884997426,
"children": {
"TorchPPOOptimizer.update": {
"total": 129.58029587000203,
"count": 4587,
"is_parallel": true,
"self": 129.58029587000203
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09153835400002208,
"count": 1,
"self": 0.0009645400000408699,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09057381399998121,
"count": 1,
"self": 0.09057381399998121
}
}
}
}
}
}
}