Wembo's picture
First Push
62cb4c0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0260181427001953,
"min": 1.0260181427001953,
"max": 2.8492064476013184,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9830.2802734375,
"min": 9830.2802734375,
"max": 29241.40625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.225529670715332,
"min": 0.2610379457473755,
"max": 12.225529670715332,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2383.978271484375,
"min": 50.641361236572266,
"max": 2460.5400390625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06649353756538226,
"min": 0.06079165873610799,
"max": 0.07567304706340199,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26597415026152904,
"min": 0.258229916199329,
"max": 0.37836523531700994,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20006445769731906,
"min": 0.11048052128970476,
"max": 0.29518860096440597,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8002578307892763,
"min": 0.44192208515881903,
"max": 1.394432828414674,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.568181818181817,
"min": 3.227272727272727,
"max": 24.272727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1037.0,
"min": 142.0,
"max": 1314.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.568181818181817,
"min": 3.227272727272727,
"max": 24.272727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1037.0,
"min": 142.0,
"max": 1314.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703821824",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703822410"
},
"total": 585.6417612540001,
"count": 1,
"self": 0.544675993000169,
"children": {
"run_training.setup": {
"total": 0.07149127499997121,
"count": 1,
"self": 0.07149127499997121
},
"TrainerController.start_learning": {
"total": 585.025593986,
"count": 1,
"self": 0.8422403949998625,
"children": {
"TrainerController._reset_env": {
"total": 4.400702438999929,
"count": 1,
"self": 4.400702438999929
},
"TrainerController.advance": {
"total": 579.6784358980001,
"count": 18203,
"self": 0.4465639099911414,
"children": {
"env_step": {
"total": 579.231871988009,
"count": 18203,
"self": 452.69884230801574,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.12675425400255,
"count": 18203,
"self": 2.454784062013914,
"children": {
"TorchPolicy.evaluate": {
"total": 123.67197019198863,
"count": 18203,
"self": 123.67197019198863
}
}
},
"workers": {
"total": 0.4062754259906569,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 582.8747366660185,
"count": 18203,
"is_parallel": true,
"self": 268.10960841701296,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00725013199996738,
"count": 1,
"is_parallel": true,
"self": 0.0052265269999907105,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002023604999976669,
"count": 10,
"is_parallel": true,
"self": 0.002023604999976669
}
}
},
"UnityEnvironment.step": {
"total": 0.04675220399997215,
"count": 1,
"is_parallel": true,
"self": 0.0006987089999483942,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004565680000041539,
"count": 1,
"is_parallel": true,
"self": 0.0004565680000041539
},
"communicator.exchange": {
"total": 0.04345763000003444,
"count": 1,
"is_parallel": true,
"self": 0.04345763000003444
},
"steps_from_proto": {
"total": 0.002139296999985163,
"count": 1,
"is_parallel": true,
"self": 0.00042822300019906834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017110739997860946,
"count": 10,
"is_parallel": true,
"self": 0.0017110739997860946
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 314.76512824900556,
"count": 18202,
"is_parallel": true,
"self": 15.296865830998854,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.564070324997147,
"count": 18202,
"is_parallel": true,
"self": 7.564070324997147
},
"communicator.exchange": {
"total": 247.02114556300796,
"count": 18202,
"is_parallel": true,
"self": 247.02114556300796
},
"steps_from_proto": {
"total": 44.88304653000159,
"count": 18202,
"is_parallel": true,
"self": 8.872871600964345,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.01017492903725,
"count": 182020,
"is_parallel": true,
"self": 36.01017492903725
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016321800012519816,
"count": 1,
"self": 0.00016321800012519816,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 572.5672028480466,
"count": 766446,
"is_parallel": true,
"self": 18.273120908029796,
"children": {
"process_trajectory": {
"total": 312.55070729501756,
"count": 766446,
"is_parallel": true,
"self": 311.78318865901747,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7675186360000907,
"count": 4,
"is_parallel": true,
"self": 0.7675186360000907
}
}
},
"_update_policy": {
"total": 241.74337464499922,
"count": 90,
"is_parallel": true,
"self": 71.16097788599848,
"children": {
"TorchPPOOptimizer.update": {
"total": 170.58239675900074,
"count": 4587,
"is_parallel": true,
"self": 170.58239675900074
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10405203599998458,
"count": 1,
"self": 0.0014987130000463367,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10255332299993825,
"count": 1,
"self": 0.10255332299993825
}
}
}
}
}
}
}